hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
02509a03dc03e70e85a4a2d93bf71dca137b59e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mkRender(float *fb, int max_x, int max_y) {
//MK: Pixel ThreadId, BlockId
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//MK: Pixel FB
if((i >= max_x) || (j >= max_y)){
return;
}
//MK: FB Pixel
int pixel_index = j*max_x*3 + i*3;
fb[pixel_index + 0] = float(i) / max_x;
fb[pixel_index + 1] = float(j) / max_y;
fb[pixel_index + 2] = 0.2f;
} | 02509a03dc03e70e85a4a2d93bf71dca137b59e1.cu | #include "includes.h"
__global__ void mkRender(float *fb, int max_x, int max_y) {
//MK: Pixel 위치 계산을 위해 ThreadId, BlockId를 사용함
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//MK: 계산된 Pixel 위치가 FB사이즈 보다 크면 연산을 수행하지 않음
if((i >= max_x) || (j >= max_y)){
return;
}
//MK: FB Pixel 값 계산
int pixel_index = j*max_x*3 + i*3;
fb[pixel_index + 0] = float(i) / max_x;
fb[pixel_index + 1] = float(j) / max_y;
fb[pixel_index + 2] = 0.2f;
} |
691b9e8186bd07ff2dc6e99148f1ba675f3d1bfe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_func.h"
#include <iostream>
#include <cmath>
/*
Inline function to check if image index is in bounds
*/
static __device__ __forceinline__ bool in_img(int x, int y, int rows, int cols)
{
return x >= 0 && x < cols && y >= 0 && y < rows;
}
__global__ void device_add_one(int* d_result, int t)
{
*d_result = t + 1;
}
/*
Dummy function which is used to warm up GPU
*/
int gpuWarmup(int t)
{
int result;
int *d_result;
hipMalloc((void **)&d_result, 1 * sizeof(int));
device_add_one << <1, 1 >> >(d_result, t);
hipMemcpy(&result, d_result, 1 * sizeof(int), hipMemcpyDeviceToHost);
return result;
}
/*
Converts BGR image to grayscale
*/
template<int px_per_thread>
__global__ void bgr_to_grayscale(uchar3 *bgr_ptr, float *gray_ptr, int rows, int cols)
{
// get global index within image
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = px_per_thread * (blockIdx.y * blockDim.y + threadIdx.y);
// loop over the number of pixels each thread is handling
for (size_t i = 0; i < px_per_thread; ++i)
{
// get BGR pixel values
uchar3 p;
if (in_img(x, y + i, rows, cols))
p = bgr_ptr[(y + i) * cols + x];
else
return;
// calculate grayscale value
float g = 0.298839f*(float)p.z + 0.586811f*(float)p.y + 0.114350f*(float)p.x;
// set grayscale value in image
if (in_img(x, y + i, rows, cols))
gray_ptr[(y + i) * cols + x] = (g >= 255.f ? 255.f : g);
}
}
void callGrayscaleKernel(uchar3 *bgr_ptr, float *gray_ptr, int rows, int cols) {
// define num pixels each thread operates on
const int px_per_thread = 4;
// define block and grid sizes
dim3 block_size(32, 8);
dim3 grid_size(0, 0);
grid_size.x = (cols + block_size.x - 1) / block_size.x;
grid_size.y = (rows + px_per_thread * block_size.y - 1) / (px_per_thread * block_size.y);
// call grayscale conversion kernel
bgr_to_grayscale<px_per_thread> << <grid_size, block_size >> >(bgr_ptr, gray_ptr, rows, cols);
return;
}
template<int px_per_thread>
__global__ void copy_roi(float *src, int src_rows, int src_cols,
float *dst, int x, int y, int width, int height)
{
// get global index within image
const int x_dst = blockIdx.x * blockDim.x + threadIdx.x;
const int y_dst = px_per_thread * (blockIdx.y * blockDim.y + threadIdx.y);
// loop over the number of pixels each thread is handling
for (size_t i = 0; i < px_per_thread; i++)
{
// make sure we're in bounds of both src and dst images
if (in_img(x + x_dst, y + y_dst + i, src_rows, src_cols) && in_img(x_dst, y_dst, height, width))
dst[(y_dst + i) * width + x_dst] = src[(y + y_dst + i) * src_cols + x_dst + x];
}
return;
}
/*
Call kernel to copy section of source image into new image
*/
void callROIKernel(float *src, int src_rows, int src_cols,
float *dst, int x, int y, int width, int height)
{
// define num pixels each thread operates on
const int px_per_thread = 4;
// define block and grid sizes
dim3 block_size(32, 8);
dim3 grid_size(0, 0);
grid_size.x = (width + block_size.x - 1) / block_size.x;
grid_size.y = (height + px_per_thread * block_size.y - 1) / (px_per_thread * block_size.y);
// call grayscale conversion kernel
copy_roi<px_per_thread> << <grid_size, block_size >> >(src, src_rows, src_cols, dst, x, y, width, height);
return;
}
__global__ void sqrd_diff_normed(float *tm, int tm_rows, int tm_cols,
float *src, int src_rows, int src_cols,
float *output)
{
// get global index within output
const int x = blockIdx.x * blockDim.x + threadIdx.x; // global column
const int y = blockIdx.y * blockDim.y + threadIdx.y; // global row
// ensure in bounds
uint output_rows = src_rows - tm_rows + 1;
uint output_cols = src_cols - tm_cols + 1;
if (!in_img(x, y, output_rows, output_cols)) return;
float sqrd_diff = 0;
float tm_sqrd_sum = 0;
float src_sqrd_sum = 0;
// iterate over all template image pixels and correspondences
for (uint i = 0; i < tm_rows; i++) {
for (uint j = 0; j < tm_cols; j++) {
float tm_val = tm[i * tm_cols + j];
float src_val = src[(y + i) * src_cols + x + j];
// increment squared difference
float diff = tm_val - src_val;
sqrd_diff += diff * diff;
// increment squared sum
tm_sqrd_sum += tm_val * tm_val;
src_sqrd_sum += src_val * src_val;
}
}
output[y * output_cols + x] = 255.f * sqrd_diff / sqrt(tm_sqrd_sum * src_sqrd_sum);
return;
}
/*
Perform template matching by calling kernel
*/
void callTemplateMatchKernel(float *tm, int tm_rows, int tm_cols, // template info
float *src, int src_rows, int src_cols, // source info
float *output) // output
{
uint output_rows = src_rows - tm_rows + 1;
uint output_cols = src_cols - tm_cols + 1;
// each thread computes one pixel in the result
dim3 block_size(32, 8);
dim3 grid_size(0, 0);
grid_size.x = (output_cols + block_size.x - 1) / block_size.x;
grid_size.y = (output_rows + block_size.y - 1) / block_size.y;
sqrd_diff_normed << <grid_size, block_size >> >(tm, tm_rows, tm_cols, src, src_rows, src_cols, output);
return;
}
__global__ void find_min_loc(float *src, int rows, int cols, uint * min_loc) {
float min_val = 256.f;
for (uint i = 0; i < rows; i++) {
for (uint j = 0; j < cols; j++) {
float im_val = src[i * cols + j];
if (im_val < min_val) {
min_val = im_val;
min_loc[0] = j; // col
min_loc[1] = i; // row
}
}
}
return;
}
/*
Slowly and naively determines location of minimum value in device image
*/
void callMinLocKernel(float *src, int rows, int cols, uint * min_loc) {
find_min_loc << <1, 1 >> >(src, rows, cols, min_loc);
return;
}
| 691b9e8186bd07ff2dc6e99148f1ba675f3d1bfe.cu | #include "gpu_func.h"
#include <iostream>
#include <cmath>
/*
Inline function to check if image index is in bounds
*/
static __device__ __forceinline__ bool in_img(int x, int y, int rows, int cols)
{
return x >= 0 && x < cols && y >= 0 && y < rows;
}
__global__ void device_add_one(int* d_result, int t)
{
*d_result = t + 1;
}
/*
Dummy function which is used to warm up GPU
*/
int gpuWarmup(int t)
{
int result;
int *d_result;
cudaMalloc((void **)&d_result, 1 * sizeof(int));
device_add_one << <1, 1 >> >(d_result, t);
cudaMemcpy(&result, d_result, 1 * sizeof(int), cudaMemcpyDeviceToHost);
return result;
}
/*
Converts BGR image to grayscale
*/
template<int px_per_thread>
__global__ void bgr_to_grayscale(uchar3 *bgr_ptr, float *gray_ptr, int rows, int cols)
{
// get global index within image
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = px_per_thread * (blockIdx.y * blockDim.y + threadIdx.y);
// loop over the number of pixels each thread is handling
for (size_t i = 0; i < px_per_thread; ++i)
{
// get BGR pixel values
uchar3 p;
if (in_img(x, y + i, rows, cols))
p = bgr_ptr[(y + i) * cols + x];
else
return;
// calculate grayscale value
float g = 0.298839f*(float)p.z + 0.586811f*(float)p.y + 0.114350f*(float)p.x;
// set grayscale value in image
if (in_img(x, y + i, rows, cols))
gray_ptr[(y + i) * cols + x] = (g >= 255.f ? 255.f : g);
}
}
void callGrayscaleKernel(uchar3 *bgr_ptr, float *gray_ptr, int rows, int cols) {
// define num pixels each thread operates on
const int px_per_thread = 4;
// define block and grid sizes
dim3 block_size(32, 8);
dim3 grid_size(0, 0);
grid_size.x = (cols + block_size.x - 1) / block_size.x;
grid_size.y = (rows + px_per_thread * block_size.y - 1) / (px_per_thread * block_size.y);
// call grayscale conversion kernel
bgr_to_grayscale<px_per_thread> << <grid_size, block_size >> >(bgr_ptr, gray_ptr, rows, cols);
return;
}
template<int px_per_thread>
__global__ void copy_roi(float *src, int src_rows, int src_cols,
float *dst, int x, int y, int width, int height)
{
// get global index within image
const int x_dst = blockIdx.x * blockDim.x + threadIdx.x;
const int y_dst = px_per_thread * (blockIdx.y * blockDim.y + threadIdx.y);
// loop over the number of pixels each thread is handling
for (size_t i = 0; i < px_per_thread; i++)
{
// make sure we're in bounds of both src and dst images
if (in_img(x + x_dst, y + y_dst + i, src_rows, src_cols) && in_img(x_dst, y_dst, height, width))
dst[(y_dst + i) * width + x_dst] = src[(y + y_dst + i) * src_cols + x_dst + x];
}
return;
}
/*
Call kernel to copy section of source image into new image
*/
void callROIKernel(float *src, int src_rows, int src_cols,
float *dst, int x, int y, int width, int height)
{
// define num pixels each thread operates on
const int px_per_thread = 4;
// define block and grid sizes
dim3 block_size(32, 8);
dim3 grid_size(0, 0);
grid_size.x = (width + block_size.x - 1) / block_size.x;
grid_size.y = (height + px_per_thread * block_size.y - 1) / (px_per_thread * block_size.y);
// call grayscale conversion kernel
copy_roi<px_per_thread> << <grid_size, block_size >> >(src, src_rows, src_cols, dst, x, y, width, height);
return;
}
__global__ void sqrd_diff_normed(float *tm, int tm_rows, int tm_cols,
float *src, int src_rows, int src_cols,
float *output)
{
// get global index within output
const int x = blockIdx.x * blockDim.x + threadIdx.x; // global column
const int y = blockIdx.y * blockDim.y + threadIdx.y; // global row
// ensure in bounds
uint output_rows = src_rows - tm_rows + 1;
uint output_cols = src_cols - tm_cols + 1;
if (!in_img(x, y, output_rows, output_cols)) return;
float sqrd_diff = 0;
float tm_sqrd_sum = 0;
float src_sqrd_sum = 0;
// iterate over all template image pixels and correspondences
for (uint i = 0; i < tm_rows; i++) {
for (uint j = 0; j < tm_cols; j++) {
float tm_val = tm[i * tm_cols + j];
float src_val = src[(y + i) * src_cols + x + j];
// increment squared difference
float diff = tm_val - src_val;
sqrd_diff += diff * diff;
// increment squared sum
tm_sqrd_sum += tm_val * tm_val;
src_sqrd_sum += src_val * src_val;
}
}
output[y * output_cols + x] = 255.f * sqrd_diff / sqrt(tm_sqrd_sum * src_sqrd_sum);
return;
}
/*
Perform template matching by calling kernel
*/
void callTemplateMatchKernel(float *tm, int tm_rows, int tm_cols, // template info
float *src, int src_rows, int src_cols, // source info
float *output) // output
{
uint output_rows = src_rows - tm_rows + 1;
uint output_cols = src_cols - tm_cols + 1;
// each thread computes one pixel in the result
dim3 block_size(32, 8);
dim3 grid_size(0, 0);
grid_size.x = (output_cols + block_size.x - 1) / block_size.x;
grid_size.y = (output_rows + block_size.y - 1) / block_size.y;
sqrd_diff_normed << <grid_size, block_size >> >(tm, tm_rows, tm_cols, src, src_rows, src_cols, output);
return;
}
__global__ void find_min_loc(float *src, int rows, int cols, uint * min_loc) {
float min_val = 256.f;
for (uint i = 0; i < rows; i++) {
for (uint j = 0; j < cols; j++) {
float im_val = src[i * cols + j];
if (im_val < min_val) {
min_val = im_val;
min_loc[0] = j; // col
min_loc[1] = i; // row
}
}
}
return;
}
/*
Slowly and naively determines location of minimum value in device image
*/
void callMinLocKernel(float *src, int rows, int cols, uint * min_loc) {
find_min_loc << <1, 1 >> >(src, rows, cols, min_loc);
return;
}
|
140302b53185aa8deba0efe1ea36596cbf304ad9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <inttypes.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
//#include <time.h>
#include <sys/time.h>
#include <sys/types.h>
#include <dirent.h>
#include <unistd.h>
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
#define MAX_STR_LEN 256
struct ponto_capturado{
int TID;
char *clazz;
int time;
double lat, lon;
int gid;
int stopId;
};
struct trajetoria{
ponto_capturado** pontos;
int qntdPontos;
};
__device__ double euclidean(double *p1, double *p2);
__global__ void msm(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors);
trajetoria** trajetorias;
trajetoria* readTrajFile(char*);
double* trajectoryRawer(trajetoria*);
double distance(double*, int, double*, int);
int main(int argc, char *argv[]) {
int file_count = 0;
int len;
DIR * dirp;
struct dirent * entry;
dirp = opendir("./trajetorias");
while ((entry = readdir(dirp)) != NULL) {
len = strlen (entry->d_name);
if (entry->d_type == DT_REG && strcmp (".traj", &(entry->d_name[len - 5])) == 0) { /* If the entry is a regular file */
file_count++;
}
}
closedir(dirp);
trajetorias = (trajetoria**) malloc(file_count*sizeof(trajetoria*));
DIR* FD;
struct dirent* in_file;
if (NULL == (FD = opendir ("./trajetorias"))) {
fprintf(stderr, "Error : Failed to open input directory\n");
return 1;
}
int fileCounter = 0;
while ((in_file = readdir(FD))) {
len = strlen (in_file->d_name);
if (len > 4 && in_file->d_type == DT_REG && strcmp (".traj", &(in_file->d_name[len - 5])) == 0) {
if (!strcmp (in_file->d_name, "."))
continue;
if (!strcmp (in_file->d_name, ".."))
continue;
char filePath[1024];
sprintf( filePath, "%s/%s", "./trajetorias", in_file->d_name );
trajetorias[fileCounter++] = readTrajFile(filePath);
}
}
printf("Qntd arquivos lidos %d\n", file_count);
double** allDistances = (double**) malloc(file_count*sizeof(double*));
double** rawTrajs = (double**) malloc(file_count*sizeof(double*));
for(int k = 0;k<file_count;k++) {
rawTrajs[k] = trajectoryRawer(trajetorias[k]);
}
for(int k = 0;k<file_count;k++) {
allDistances[k] = (double*) malloc(file_count*sizeof(double));
}
printf("Trajetorias transformadas %d\n", file_count);
for(int k = 0;k<file_count;k++) {
allDistances[k][k] = 0.0;
for(int l = 0;l<file_count;l++) {
//printf("Distance lengthA=%d, lengthB=%d\n", trajetorias[k]->qntdPontos, trajetorias[l]->qntdPontos);
if(k<l) {
double *trajA = rawTrajs[k];
double *trajB = rawTrajs[l];
double similarity = distance(trajA, trajetorias[k]->qntdPontos, trajB, trajetorias[l]->qntdPontos);
allDistances[k][l] = similarity;
allDistances[l][k] = similarity;
//printf("Similaridade das trajetrias: %.2f\n", similarity);
}
}
}
for(int i = 0; i < file_count;i++) {
if(trajetorias[i]) {
for(int j = 0; j < trajetorias[i]->qntdPontos;j++) {
free(trajetorias[i]->pontos[j]);
}
free(trajetorias[i]);
}
}
free(trajetorias);
return 0;
}
double distance(double* trajA, int N, double* trajB, int M) {
double* aScore = (double*)malloc( N*sizeof(double));
double* bScore = (double*)malloc( N*M*sizeof(double));
double* semanticsDescriptors = (double*)malloc( 2*2*sizeof(double));
//GEO
semanticsDescriptors[0] = 0.0;
semanticsDescriptors[1] = 0.5;
//TIME
semanticsDescriptors[2] = 0.0;
semanticsDescriptors[3] = 0.5;
//printf("Distance lengthA=%d, lengthB=%d\n", N,M);
double *d_trajA,*d_trajB, *d_aScore, *d_bScore, *d_semanticsDescriptors;
hipMalloc( (void**) &d_trajA, 4*N*sizeof(double) );
hipMalloc( (void**) &d_trajB, 4*M*sizeof(double) );
hipMalloc( (void**) &d_semanticsDescriptors, 2*2*sizeof(double) );
hipMalloc( (void**) &d_aScore, N*sizeof(double) );
hipMalloc( (void**) &d_bScore, N*M*sizeof(double) );
hipMemcpy( (void*) d_trajA, (void*) trajA, 4*N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( (void*) d_trajB, (void*) trajB, 4*M*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( (void*) d_semanticsDescriptors, (void*) semanticsDescriptors, 2*2*sizeof(double), hipMemcpyHostToDevice);
int THREADS = 512;
int BLOCOS = (N/THREADS) + 1;
struct timeval begin, end;
gettimeofday(&begin, NULL);
hipLaunchKernelGGL(( msm), dim3(BLOCOS), dim3(THREADS), 0, 0, d_trajA, N, d_trajB, M, d_aScore, d_bScore, d_semanticsDescriptors );
gettimeofday(&end, NULL);
hipMemcpy( (void*) aScore, (void*) d_aScore, N*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy( (void*) bScore, (void*) d_bScore, N*M*sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_trajA);
hipFree(d_trajB);
hipFree(d_aScore);
hipFree(d_bScore);
hipFree(d_semanticsDescriptors);
double parityAB = 0.0;
for (int i = 0; i < N; i++) {
parityAB += aScore[i];
}
double parityBA = 0.0;
for (int i = 0; i < N; i++) {
double maxScore = 0.0;
for (int j = 0; j < M; j++) {
maxScore = MAX(maxScore, bScore[i * M + j]);
}
parityBA += maxScore;
}
//printf("parityAB=%.2f, parityBA=%.2f\n", parityAB, parityBA );
double similarity = (parityAB + parityBA) / (N + M);
free(semanticsDescriptors);
//printf("similarity=%.2f\n", similarity );
free(bScore);
free(aScore);
aScore = NULL;
bScore = NULL;
semanticsDescriptors = NULL;
return similarity;
}
trajetoria* readTrajFile(char *filePath) {
/* FileStream for the Library File */
FILE *trajFile;
/* allocation of the buffer for every line in the File */
char *buf = (char*) malloc(MAX_STR_LEN);
char *tmp;
/* if the space could not be allocaed, return an error */
if (buf == NULL) {
printf ("No memory\n");
return NULL;
}
if ( ( trajFile = fopen( filePath, "r" ) ) == NULL ) //Reading a file
{
printf( "File could not be opened: %s.\n", filePath );
return NULL;
}
int pointsCounter = 0;
while (fgets(buf, MAX_STR_LEN - 1, trajFile) != NULL) {
pointsCounter++;
}
fclose(trajFile);
ponto_capturado **traj = (ponto_capturado**) malloc(pointsCounter*sizeof(ponto_capturado*));
trajetoria* trajetoria = new struct trajetoria;
trajetoria->pontos = traj;
trajetoria->qntdPontos = pointsCounter;
if ( ( trajFile = fopen( filePath, "r" ) ) == NULL ) {
printf( "File could not be opened: %s.\n", filePath );
return NULL;
}
int i = 0;
while (fgets(buf, MAX_STR_LEN - 1, trajFile) != NULL)
{
if (strlen(buf)>0) {
if(buf[strlen (buf) - 1] == '\n')
buf[strlen (buf) - 1] = '\0';
} else {
if(buf[0] == '\n') {
continue;
}
}
tmp = strtok(buf, ";");
traj[i] = new ponto_capturado();
traj[i]->TID = atoi(tmp);
tmp = strtok(NULL, ";");
int len = strlen(tmp);
traj[i]->clazz = (char*)malloc(len + 1);
strcpy(traj[i]->clazz, tmp);
tmp = strtok(NULL, ";");
traj[i]->time = atoi(tmp);
tmp = strtok(NULL, ";");
traj[i]->lat = atof(tmp);
tmp = strtok(NULL, ";");
traj[i]->lon = atof(tmp);
tmp = strtok(NULL, ";");
traj[i]->gid = atoi(tmp);
tmp = strtok(NULL, ";");
if ((tmp != NULL) && (tmp[0] == '\0')) {
traj[i]->stopId = atoi(tmp);
} else {
traj[i]->stopId = 0;
}
i++;
}
//printf("Loaded %s - %d points\n", filePath, i);
fclose(trajFile);
return trajetoria;
}
double* trajectoryRawer(trajetoria* trajetoria) {
int N = trajetoria->qntdPontos;
double* trajA = (double*)malloc( 4*N*sizeof(double));
for(int i = 0; i < N; i++) {
trajA[i * 4] = trajetoria->pontos[i]->lat;
trajA[i * 4 + 1] = trajetoria->pontos[i]->lon;
trajA[i * 4 + 2] = trajetoria->pontos[i]->time;
trajA[i * 4 + 3] = trajetoria->pontos[i]->time + 30;
}
return trajA;
}
//extern "C"
__global__ void msm(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=lengthA) {
return;
}
double latGeoA = trajA[i * 4];
double lonGeoA = trajA[i * 4 + 1];
double startTimeA = trajA[i * 4 + 2];
double endTimeA = trajA[i * 4 + 3];
double geoThreshold = semanticsDescriptors[0];
double timeThreshold = semanticsDescriptors[2];
double geoWeight = semanticsDescriptors[1];
double timeWeight = semanticsDescriptors[3];
double maxScore = 0.0;
for (int j = 0; j < lengthB; j++) {
double latGeoB = trajB[j * 4];
double lonGeoB = trajB[j * 4 + 1];
double startTimeB = trajB[j * 4 + 2];
double endTimeB = trajB[j * 4 + 3];
double timeScore = 0.0;
if(startTimeA < endTimeB && startTimeB < endTimeA ) {
double overlap = MIN(endTimeA, endTimeB) - MAX(startTimeA, startTimeB);
if(overlap > 0.0) {
double duration = MAX(endTimeA, endTimeB) - MIN(startTimeA, startTimeB);
double timeDistance = 1 - (overlap / duration);
timeScore = (timeDistance <= timeThreshold ? 1 : 0) * timeWeight;
}
}
double geoB[] = {latGeoB, lonGeoB};
double geoA[] = {latGeoA, lonGeoA};
double geoScore = (euclidean(geoB, geoA) <= geoThreshold ? 1 : 0) * geoWeight;
double sumScore = timeScore + geoScore;
if(sumScore > maxScore) {
maxScore = sumScore;
}
bScore[i * lengthB + j] = sumScore;
}
aScore[i] = maxScore;
//printf("Thread %d, maxScore=%.2f, maxGeoScore=%.2f, maxTimeScore=%.2f\n", i, maxScore, maxGeoScore,maxTimeScore );
}
__device__ double euclidean(double *p1, double *p2)
{
double distX = abs(p1[0] - p2[0]);
double distXSquare = distX * distX;
double distY = abs(p1[1] - p2[1]);
double distYSquare = distY * distY;
return sqrt(distXSquare + distYSquare);
}
| 140302b53185aa8deba0efe1ea36596cbf304ad9.cu | #include <inttypes.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
//#include <time.h>
#include <sys/time.h>
#include <sys/types.h>
#include <dirent.h>
#include <unistd.h>
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
#define MAX_STR_LEN 256
struct ponto_capturado{
int TID;
char *clazz;
int time;
double lat, lon;
int gid;
int stopId;
};
struct trajetoria{
ponto_capturado** pontos;
int qntdPontos;
};
__device__ double euclidean(double *p1, double *p2);
__global__ void msm(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors);
trajetoria** trajetorias;
trajetoria* readTrajFile(char*);
double* trajectoryRawer(trajetoria*);
double distance(double*, int, double*, int);
int main(int argc, char *argv[]) {
int file_count = 0;
int len;
DIR * dirp;
struct dirent * entry;
dirp = opendir("./trajetorias");
while ((entry = readdir(dirp)) != NULL) {
len = strlen (entry->d_name);
if (entry->d_type == DT_REG && strcmp (".traj", &(entry->d_name[len - 5])) == 0) { /* If the entry is a regular file */
file_count++;
}
}
closedir(dirp);
trajetorias = (trajetoria**) malloc(file_count*sizeof(trajetoria*));
DIR* FD;
struct dirent* in_file;
if (NULL == (FD = opendir ("./trajetorias"))) {
fprintf(stderr, "Error : Failed to open input directory\n");
return 1;
}
int fileCounter = 0;
while ((in_file = readdir(FD))) {
len = strlen (in_file->d_name);
if (len > 4 && in_file->d_type == DT_REG && strcmp (".traj", &(in_file->d_name[len - 5])) == 0) {
if (!strcmp (in_file->d_name, "."))
continue;
if (!strcmp (in_file->d_name, ".."))
continue;
char filePath[1024];
sprintf( filePath, "%s/%s", "./trajetorias", in_file->d_name );
trajetorias[fileCounter++] = readTrajFile(filePath);
}
}
printf("Qntd arquivos lidos %d\n", file_count);
double** allDistances = (double**) malloc(file_count*sizeof(double*));
double** rawTrajs = (double**) malloc(file_count*sizeof(double*));
for(int k = 0;k<file_count;k++) {
rawTrajs[k] = trajectoryRawer(trajetorias[k]);
}
for(int k = 0;k<file_count;k++) {
allDistances[k] = (double*) malloc(file_count*sizeof(double));
}
printf("Trajetorias transformadas %d\n", file_count);
for(int k = 0;k<file_count;k++) {
allDistances[k][k] = 0.0;
for(int l = 0;l<file_count;l++) {
//printf("Distance lengthA=%d, lengthB=%d\n", trajetorias[k]->qntdPontos, trajetorias[l]->qntdPontos);
if(k<l) {
double *trajA = rawTrajs[k];
double *trajB = rawTrajs[l];
double similarity = distance(trajA, trajetorias[k]->qntdPontos, trajB, trajetorias[l]->qntdPontos);
allDistances[k][l] = similarity;
allDistances[l][k] = similarity;
//printf("Similaridade das trajetórias: %.2f\n", similarity);
}
}
}
for(int i = 0; i < file_count;i++) {
if(trajetorias[i]) {
for(int j = 0; j < trajetorias[i]->qntdPontos;j++) {
free(trajetorias[i]->pontos[j]);
}
free(trajetorias[i]);
}
}
free(trajetorias);
return 0;
}
double distance(double* trajA, int N, double* trajB, int M) {
double* aScore = (double*)malloc( N*sizeof(double));
double* bScore = (double*)malloc( N*M*sizeof(double));
double* semanticsDescriptors = (double*)malloc( 2*2*sizeof(double));
//GEO
semanticsDescriptors[0] = 0.0;
semanticsDescriptors[1] = 0.5;
//TIME
semanticsDescriptors[2] = 0.0;
semanticsDescriptors[3] = 0.5;
//printf("Distance lengthA=%d, lengthB=%d\n", N,M);
double *d_trajA,*d_trajB, *d_aScore, *d_bScore, *d_semanticsDescriptors;
cudaMalloc( (void**) &d_trajA, 4*N*sizeof(double) );
cudaMalloc( (void**) &d_trajB, 4*M*sizeof(double) );
cudaMalloc( (void**) &d_semanticsDescriptors, 2*2*sizeof(double) );
cudaMalloc( (void**) &d_aScore, N*sizeof(double) );
cudaMalloc( (void**) &d_bScore, N*M*sizeof(double) );
cudaMemcpy( (void*) d_trajA, (void*) trajA, 4*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( (void*) d_trajB, (void*) trajB, 4*M*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( (void*) d_semanticsDescriptors, (void*) semanticsDescriptors, 2*2*sizeof(double), cudaMemcpyHostToDevice);
int THREADS = 512;
int BLOCOS = (N/THREADS) + 1;
struct timeval begin, end;
gettimeofday(&begin, NULL);
msm<<<BLOCOS, THREADS>>>( d_trajA, N, d_trajB, M, d_aScore, d_bScore, d_semanticsDescriptors );
gettimeofday(&end, NULL);
cudaMemcpy( (void*) aScore, (void*) d_aScore, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy( (void*) bScore, (void*) d_bScore, N*M*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_trajA);
cudaFree(d_trajB);
cudaFree(d_aScore);
cudaFree(d_bScore);
cudaFree(d_semanticsDescriptors);
double parityAB = 0.0;
for (int i = 0; i < N; i++) {
parityAB += aScore[i];
}
double parityBA = 0.0;
for (int i = 0; i < N; i++) {
double maxScore = 0.0;
for (int j = 0; j < M; j++) {
maxScore = MAX(maxScore, bScore[i * M + j]);
}
parityBA += maxScore;
}
//printf("parityAB=%.2f, parityBA=%.2f\n", parityAB, parityBA );
double similarity = (parityAB + parityBA) / (N + M);
free(semanticsDescriptors);
//printf("similarity=%.2f\n", similarity );
free(bScore);
free(aScore);
aScore = NULL;
bScore = NULL;
semanticsDescriptors = NULL;
return similarity;
}
trajetoria* readTrajFile(char *filePath) {
/* FileStream for the Library File */
FILE *trajFile;
/* allocation of the buffer for every line in the File */
char *buf = (char*) malloc(MAX_STR_LEN);
char *tmp;
/* if the space could not be allocaed, return an error */
if (buf == NULL) {
printf ("No memory\n");
return NULL;
}
if ( ( trajFile = fopen( filePath, "r" ) ) == NULL ) //Reading a file
{
printf( "File could not be opened: %s.\n", filePath );
return NULL;
}
int pointsCounter = 0;
while (fgets(buf, MAX_STR_LEN - 1, trajFile) != NULL) {
pointsCounter++;
}
fclose(trajFile);
ponto_capturado **traj = (ponto_capturado**) malloc(pointsCounter*sizeof(ponto_capturado*));
trajetoria* trajetoria = new struct trajetoria;
trajetoria->pontos = traj;
trajetoria->qntdPontos = pointsCounter;
if ( ( trajFile = fopen( filePath, "r" ) ) == NULL ) {
printf( "File could not be opened: %s.\n", filePath );
return NULL;
}
int i = 0;
while (fgets(buf, MAX_STR_LEN - 1, trajFile) != NULL)
{
if (strlen(buf)>0) {
if(buf[strlen (buf) - 1] == '\n')
buf[strlen (buf) - 1] = '\0';
} else {
if(buf[0] == '\n') {
continue;
}
}
tmp = strtok(buf, ";");
traj[i] = new ponto_capturado();
traj[i]->TID = atoi(tmp);
tmp = strtok(NULL, ";");
int len = strlen(tmp);
traj[i]->clazz = (char*)malloc(len + 1);
strcpy(traj[i]->clazz, tmp);
tmp = strtok(NULL, ";");
traj[i]->time = atoi(tmp);
tmp = strtok(NULL, ";");
traj[i]->lat = atof(tmp);
tmp = strtok(NULL, ";");
traj[i]->lon = atof(tmp);
tmp = strtok(NULL, ";");
traj[i]->gid = atoi(tmp);
tmp = strtok(NULL, ";");
if ((tmp != NULL) && (tmp[0] == '\0')) {
traj[i]->stopId = atoi(tmp);
} else {
traj[i]->stopId = 0;
}
i++;
}
//printf("Loaded %s - %d points\n", filePath, i);
fclose(trajFile);
return trajetoria;
}
double* trajectoryRawer(trajetoria* trajetoria) {
int N = trajetoria->qntdPontos;
double* trajA = (double*)malloc( 4*N*sizeof(double));
for(int i = 0; i < N; i++) {
trajA[i * 4] = trajetoria->pontos[i]->lat;
trajA[i * 4 + 1] = trajetoria->pontos[i]->lon;
trajA[i * 4 + 2] = trajetoria->pontos[i]->time;
trajA[i * 4 + 3] = trajetoria->pontos[i]->time + 30;
}
return trajA;
}
//extern "C"
__global__ void msm(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=lengthA) {
return;
}
double latGeoA = trajA[i * 4];
double lonGeoA = trajA[i * 4 + 1];
double startTimeA = trajA[i * 4 + 2];
double endTimeA = trajA[i * 4 + 3];
double geoThreshold = semanticsDescriptors[0];
double timeThreshold = semanticsDescriptors[2];
double geoWeight = semanticsDescriptors[1];
double timeWeight = semanticsDescriptors[3];
double maxScore = 0.0;
for (int j = 0; j < lengthB; j++) {
double latGeoB = trajB[j * 4];
double lonGeoB = trajB[j * 4 + 1];
double startTimeB = trajB[j * 4 + 2];
double endTimeB = trajB[j * 4 + 3];
double timeScore = 0.0;
if(startTimeA < endTimeB && startTimeB < endTimeA ) {
double overlap = MIN(endTimeA, endTimeB) - MAX(startTimeA, startTimeB);
if(overlap > 0.0) {
double duration = MAX(endTimeA, endTimeB) - MIN(startTimeA, startTimeB);
double timeDistance = 1 - (overlap / duration);
timeScore = (timeDistance <= timeThreshold ? 1 : 0) * timeWeight;
}
}
double geoB[] = {latGeoB, lonGeoB};
double geoA[] = {latGeoA, lonGeoA};
double geoScore = (euclidean(geoB, geoA) <= geoThreshold ? 1 : 0) * geoWeight;
double sumScore = timeScore + geoScore;
if(sumScore > maxScore) {
maxScore = sumScore;
}
bScore[i * lengthB + j] = sumScore;
}
aScore[i] = maxScore;
//printf("Thread %d, maxScore=%.2f, maxGeoScore=%.2f, maxTimeScore=%.2f\n", i, maxScore, maxGeoScore,maxTimeScore );
}
__device__ double euclidean(double *p1, double *p2)
{
double distX = abs(p1[0] - p2[0]);
double distXSquare = distX * distX;
double distY = abs(p1[1] - p2[1]);
double distYSquare = distY * distY;
return sqrt(distXSquare + distYSquare);
}
|
b3bc7196e32e42e4dd3e82f7c74069b909b2661a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
// clang-format off
// TODO: make spconv_utils.h order agnostic
#include "../spconv_utils.h"
// clang-format on
#include <utils/spconv/spconv/maxpool.h>
#include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/tensorview/helper_launch.h>
#include <utils/spconv/tensorview/tensorview.h>
#include <chrono>
#include <limits>
#include <type_traits>
#include <utils/spconv/tensorview/helper_kernel.cuh>
#include "pytorch_cuda_helper.hpp"
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
scalar_t in, out;
int ILPStrideY[NumILP];
Index idxo, idxi;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x; ix < numHot;
ix += blockDim.x * gridDim.x) {
{
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
in = inFeatures[idxi];
out = outFeatures[idxo];
if (in > out) {
outFeatures[idxo] = in;
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdGenericBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut,
int numHot, int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in > out) {
outFeatures[RO[ilp] + iy] = in;
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP,
typename VecType>
__global__ void maxPoolFwdVecBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideY[NumILP];
constexpr int vecloadFactor = sizeof(VecType) / sizeof(scalar_t);
scalar_t bufi[vecloadFactor];
scalar_t bufo[vecloadFactor];
Index idxi, idxo;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot;
ix += blockDim.x * gridDim.x * vecloadFactor) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
reinterpret_cast<VecType *>(bufo)[0] =
reinterpret_cast<VecType *>(outFeatures)[idxo];
reinterpret_cast<VecType *>(bufi)[0] =
reinterpret_cast<const VecType *>(inFeatures)[idxi];
#pragma unroll
for (int i = 0; i < vecloadFactor; i++) {
if (bufi[i] > bufo[i]) {
bufo[i] = bufi[i];
}
}
reinterpret_cast<VecType *>(outFeatures)[idxo] =
reinterpret_cast<VecType *>(bufo)[0];
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdGenericKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
if (ix + ILPStrideX[ilp] < numHot) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
if (ix + ILPStrideX[ilp] < numHot) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in > out) {
outFeatures[RO[ilp] + iy] = in;
}
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdBlockKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
scalar_t in, out;
Index idxo, idxi;
int ILPStrideY[NumILP];
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
fout += blockIdx.y * NumTLP;
fin += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x; ix < numHot;
ix += blockDim.x * gridDim.x) {
{
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
in = inFeatures[idxi];
out = outFeatures[idxo];
if (in == out) {
fin[idxi] += fout[idxo];
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdGenericBlockKernel(
const scalar_t *outFeatures, const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin, const Index *indicesIn,
const Index *indicesOut, int numHot, int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in == out) {
fin[RI[ilp] + iy] += fout[RO[ilp] + iy];
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP,
typename VecType>
__global__ void maxPoolBwdVecBlockKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideY[NumILP];
constexpr int vecloadFactor = sizeof(VecType) / sizeof(scalar_t);
scalar_t bufi[vecloadFactor];
scalar_t bufo[vecloadFactor];
scalar_t bufdi[vecloadFactor];
scalar_t bufdo[vecloadFactor];
Index idxi, idxo;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot;
ix += blockDim.x * gridDim.x * vecloadFactor) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
reinterpret_cast<VecType *>(bufo)[0] =
reinterpret_cast<const VecType *>(outFeatures)[idxo];
reinterpret_cast<VecType *>(bufi)[0] =
reinterpret_cast<const VecType *>(inFeatures)[idxi];
reinterpret_cast<VecType *>(bufdo)[0] =
reinterpret_cast<const VecType *>(fout)[idxo];
reinterpret_cast<VecType *>(bufdi)[0] =
reinterpret_cast<VecType *>(fin)[idxi];
#pragma unroll
for (int i = 0; i < vecloadFactor; i++) {
if (bufi[i] == bufo[i]) {
bufdi[i] += bufdo[i];
}
}
reinterpret_cast<VecType *>(fin)[idxi] =
reinterpret_cast<VecType *>(bufdi)[0];
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdGenericKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
if (ix + ILPStrideX[ilp] < numHot) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
if (ix + ILPStrideX[ilp] < numHot) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in == out) {
fin[RI[ilp] + iy] += fout[RO[ilp] + iy];
}
}
}
}
}
}
namespace functor {
template <typename scalar_t, typename Index>
struct SparseMaxPoolForwardFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d, tv::TensorView<scalar_t> outFeatures,
tv::TensorView<const scalar_t> inFeatures,
tv::TensorView<const Index> indices, int size) {
if (size <= 0) return;
int numPlanes = inFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(scalar_t);
mp_for_each<kernel_block_t>([=, &outFeatures, &inFeatures, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (numHotBlock >= NumTLP) {
hipLaunchKernelGGL(( maxPoolFwdVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(::min(size / NumTLP, 512), numPlanes / NumTLP)),
dim3(dim3(NumTLP / vecloadFactor, NumTLP / NumILP)), 0,
d.getStream(), outFeatures.data(), inFeatures.data(),
indices.subview(0).data(),
indices.subview(1).data(), numHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
hipLaunchKernelGGL(( maxPoolFwdGenericKernel<scalar_t, Index, int(NumTLP), NumILP>)
, dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),
0, d.getStream(), outFeatures.data(), inFeatures.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock,
size - numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (numHotBlock >= NumTLP) {
hipLaunchKernelGGL(( maxPoolFwdGenericBlockKernel<scalar_t, Index, NumTLP, NumILP>)
, dim3(dim3(size / NumTLP, tv::launch::DivUp(numPlanes, NumTLP))),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
outFeatures.data(), inFeatures.data(),
indices.subview(0).data(), indices.subview(1).data(),
numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
hipLaunchKernelGGL(( maxPoolFwdGenericKernel<scalar_t, Index, NumTLP, NumILP>)
, dim3(dim3(1, tv::launch::DivUp(numPlanes, NumTLP))),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
outFeatures.data(), inFeatures.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock, size - numHotBlock,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
}
};
template <typename scalar_t, typename Index>
struct SparseMaxPoolBackwardFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d,
tv::TensorView<const scalar_t> outFeatures,
tv::TensorView<const scalar_t> inFeatures,
tv::TensorView<const scalar_t> fout,
tv::TensorView<scalar_t> fin,
tv::TensorView<const Index> indices, int size) {
if (size <= 0) return;
int numPlanes = inFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(scalar_t);
mp_for_each<kernel_block_t>([=, &outFeatures, &inFeatures, &fout, &fin,
&indices, ¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (numHotBlock >= NumTLP) {
hipLaunchKernelGGL(( maxPoolBwdVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(::min(size / NumTLP, 512), numPlanes / NumTLP)),
dim3(dim3(NumTLP / vecloadFactor, NumTLP / NumILP)), 0,
d.getStream(), outFeatures.data(), inFeatures.data(),
fout.data(), fin.data(),
indices.subview(0).data(),
indices.subview(1).data(), numHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
hipLaunchKernelGGL(( maxPoolBwdGenericKernel<scalar_t, Index, int(NumTLP), NumILP>)
, dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),
0, d.getStream(), outFeatures.data(), inFeatures.data(),
fout.data(), fin.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock,
size - numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (numHotBlock >= NumTLP) {
hipLaunchKernelGGL(( maxPoolBwdGenericBlockKernel<scalar_t, Index, NumTLP, NumILP>)
, dim3(dim3(size / NumTLP, tv::launch::DivUp(numPlanes, NumTLP))),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
outFeatures.data(), inFeatures.data(), fout.data(), fin.data(),
indices.subview(0).data(), indices.subview(1).data(),
numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
hipLaunchKernelGGL(( maxPoolBwdGenericKernel<scalar_t, Index, NumTLP, NumILP>)
, dim3(dim3(1, tv::launch::DivUp(numPlanes, NumTLP))),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
outFeatures.data(), inFeatures.data(), fout.data(), fin.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock, size - numHotBlock,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_T_INDEX(scalar_t, Index) \
template struct functor::SparseMaxPoolForwardFunctor<tv::TorchGPU, scalar_t, \
Index>; \
template struct functor::SparseMaxPoolBackwardFunctor<tv::TorchGPU, \
scalar_t, Index>;
#define DECLARE_GPU_SPECS(scalar_t) DECLARE_GPU_SPECS_T_INDEX(scalar_t, int);
DECLARE_GPU_SPECS(float);
DECLARE_GPU_SPECS(double);
DECLARE_GPU_SPECS(at::Half);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_T_INDEX
| b3bc7196e32e42e4dd3e82f7c74069b909b2661a.cu | // Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
// clang-format off
// TODO: make spconv_utils.h order agnostic
#include "../spconv_utils.h"
// clang-format on
#include <utils/spconv/spconv/maxpool.h>
#include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/tensorview/helper_launch.h>
#include <utils/spconv/tensorview/tensorview.h>
#include <chrono>
#include <limits>
#include <type_traits>
#include <utils/spconv/tensorview/helper_kernel.cuh>
#include "pytorch_cuda_helper.hpp"
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
scalar_t in, out;
int ILPStrideY[NumILP];
Index idxo, idxi;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x; ix < numHot;
ix += blockDim.x * gridDim.x) {
{
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
in = inFeatures[idxi];
out = outFeatures[idxo];
if (in > out) {
outFeatures[idxo] = in;
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdGenericBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut,
int numHot, int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in > out) {
outFeatures[RO[ilp] + iy] = in;
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP,
typename VecType>
__global__ void maxPoolFwdVecBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideY[NumILP];
constexpr int vecloadFactor = sizeof(VecType) / sizeof(scalar_t);
scalar_t bufi[vecloadFactor];
scalar_t bufo[vecloadFactor];
Index idxi, idxo;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot;
ix += blockDim.x * gridDim.x * vecloadFactor) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
reinterpret_cast<VecType *>(bufo)[0] =
reinterpret_cast<VecType *>(outFeatures)[idxo];
reinterpret_cast<VecType *>(bufi)[0] =
reinterpret_cast<const VecType *>(inFeatures)[idxi];
#pragma unroll
for (int i = 0; i < vecloadFactor; i++) {
if (bufi[i] > bufo[i]) {
bufo[i] = bufi[i];
}
}
reinterpret_cast<VecType *>(outFeatures)[idxo] =
reinterpret_cast<VecType *>(bufo)[0];
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdGenericKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
if (ix + ILPStrideX[ilp] < numHot) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
if (ix + ILPStrideX[ilp] < numHot) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in > out) {
outFeatures[RO[ilp] + iy] = in;
}
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdBlockKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
scalar_t in, out;
Index idxo, idxi;
int ILPStrideY[NumILP];
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
fout += blockIdx.y * NumTLP;
fin += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x; ix < numHot;
ix += blockDim.x * gridDim.x) {
{
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
in = inFeatures[idxi];
out = outFeatures[idxo];
if (in == out) {
fin[idxi] += fout[idxo];
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdGenericBlockKernel(
const scalar_t *outFeatures, const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin, const Index *indicesIn,
const Index *indicesOut, int numHot, int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in == out) {
fin[RI[ilp] + iy] += fout[RO[ilp] + iy];
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP,
typename VecType>
__global__ void maxPoolBwdVecBlockKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideY[NumILP];
constexpr int vecloadFactor = sizeof(VecType) / sizeof(scalar_t);
scalar_t bufi[vecloadFactor];
scalar_t bufo[vecloadFactor];
scalar_t bufdi[vecloadFactor];
scalar_t bufdo[vecloadFactor];
Index idxi, idxo;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot;
ix += blockDim.x * gridDim.x * vecloadFactor) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
reinterpret_cast<VecType *>(bufo)[0] =
reinterpret_cast<const VecType *>(outFeatures)[idxo];
reinterpret_cast<VecType *>(bufi)[0] =
reinterpret_cast<const VecType *>(inFeatures)[idxi];
reinterpret_cast<VecType *>(bufdo)[0] =
reinterpret_cast<const VecType *>(fout)[idxo];
reinterpret_cast<VecType *>(bufdi)[0] =
reinterpret_cast<VecType *>(fin)[idxi];
#pragma unroll
for (int i = 0; i < vecloadFactor; i++) {
if (bufi[i] == bufo[i]) {
bufdi[i] += bufdo[i];
}
}
reinterpret_cast<VecType *>(fin)[idxi] =
reinterpret_cast<VecType *>(bufdi)[0];
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdGenericKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
if (ix + ILPStrideX[ilp] < numHot) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
if (ix + ILPStrideX[ilp] < numHot) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in == out) {
fin[RI[ilp] + iy] += fout[RO[ilp] + iy];
}
}
}
}
}
}
namespace functor {
template <typename scalar_t, typename Index>
struct SparseMaxPoolForwardFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d, tv::TensorView<scalar_t> outFeatures,
tv::TensorView<const scalar_t> inFeatures,
tv::TensorView<const Index> indices, int size) {
if (size <= 0) return;
int numPlanes = inFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(scalar_t);
mp_for_each<kernel_block_t>([=, &outFeatures, &inFeatures, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (numHotBlock >= NumTLP) {
maxPoolFwdVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(std::min(size / NumTLP, 512), numPlanes / NumTLP),
dim3(NumTLP / vecloadFactor, NumTLP / NumILP), 0,
d.getStream()>>>(outFeatures.data(), inFeatures.data(),
indices.subview(0).data(),
indices.subview(1).data(), numHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
maxPoolFwdGenericKernel<scalar_t, Index, int(NumTLP), NumILP>
<<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),
0, d.getStream()>>>(outFeatures.data(), inFeatures.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock,
size - numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (numHotBlock >= NumTLP) {
maxPoolFwdGenericBlockKernel<scalar_t, Index, NumTLP, NumILP>
<<<dim3(size / NumTLP, tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
outFeatures.data(), inFeatures.data(),
indices.subview(0).data(), indices.subview(1).data(),
numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
maxPoolFwdGenericKernel<scalar_t, Index, NumTLP, NumILP>
<<<dim3(1, tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
outFeatures.data(), inFeatures.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock, size - numHotBlock,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
}
};
template <typename scalar_t, typename Index>
struct SparseMaxPoolBackwardFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d,
tv::TensorView<const scalar_t> outFeatures,
tv::TensorView<const scalar_t> inFeatures,
tv::TensorView<const scalar_t> fout,
tv::TensorView<scalar_t> fin,
tv::TensorView<const Index> indices, int size) {
if (size <= 0) return;
int numPlanes = inFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(scalar_t);
mp_for_each<kernel_block_t>([=, &outFeatures, &inFeatures, &fout, &fin,
&indices, ¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (numHotBlock >= NumTLP) {
maxPoolBwdVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(std::min(size / NumTLP, 512), numPlanes / NumTLP),
dim3(NumTLP / vecloadFactor, NumTLP / NumILP), 0,
d.getStream()>>>(outFeatures.data(), inFeatures.data(),
fout.data(), fin.data(),
indices.subview(0).data(),
indices.subview(1).data(), numHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
maxPoolBwdGenericKernel<scalar_t, Index, int(NumTLP), NumILP>
<<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),
0, d.getStream()>>>(outFeatures.data(), inFeatures.data(),
fout.data(), fin.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock,
size - numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (numHotBlock >= NumTLP) {
maxPoolBwdGenericBlockKernel<scalar_t, Index, NumTLP, NumILP>
<<<dim3(size / NumTLP, tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
outFeatures.data(), inFeatures.data(), fout.data(), fin.data(),
indices.subview(0).data(), indices.subview(1).data(),
numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
maxPoolBwdGenericKernel<scalar_t, Index, NumTLP, NumILP>
<<<dim3(1, tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
outFeatures.data(), inFeatures.data(), fout.data(), fin.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock, size - numHotBlock,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_T_INDEX(scalar_t, Index) \
template struct functor::SparseMaxPoolForwardFunctor<tv::TorchGPU, scalar_t, \
Index>; \
template struct functor::SparseMaxPoolBackwardFunctor<tv::TorchGPU, \
scalar_t, Index>;
#define DECLARE_GPU_SPECS(scalar_t) DECLARE_GPU_SPECS_T_INDEX(scalar_t, int);
DECLARE_GPU_SPECS(float);
DECLARE_GPU_SPECS(double);
DECLARE_GPU_SPECS(at::Half);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_T_INDEX
|
91f296ca1304e3ddd3da59da4db583ac95289a31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Babak Poursartip
05/14/2020
*/
#include <stdio.h>
__global__
void hello(){
printf("I am: %d %d \n",threadIdx.x, blockIdx.x);
}
int main(){
printf(" code start ... \n");
dim3 blocks(16,1,1);
dim3 threads(1,1,1);
hipLaunchKernelGGL((
hello), dim3(blocks),dim3(threads), 0, 0, );
hipDeviceSynchronize();
printf(" end here. \n");
return 0;
}
| 91f296ca1304e3ddd3da59da4db583ac95289a31.cu |
/*
Babak Poursartip
05/14/2020
*/
#include <stdio.h>
__global__
void hello(){
printf("I am: %d %d \n",threadIdx.x, blockIdx.x);
}
int main(){
printf(" code start ... \n");
dim3 blocks(16,1,1);
dim3 threads(1,1,1);
hello<<<blocks,threads>>>();
cudaDeviceSynchronize();
printf(" end here. \n");
return 0;
}
|
37b664f88d844e26de95f346e50e8ad92258e84c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.3
* copyright (c) 2019, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre
* Date: October 2019
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include <thrust/device_ptr.h>
//#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
#include <stdexcept>
#include "gpu_tensor.h"
#include "gpu_kernels.h"
#include "gpu_hw.h"
#include "../../tensor/tensor.h"
#include "../../descriptors/descriptors.h"
void gpu_reduce(Tensor *A, Tensor *B,string mode,int* map)
{
int device=A->gpu_device;
int *gmap;
hipSetDevice(device);
int s=A->size/B->size;
check_cuda(hipMalloc((void**)&(gmap),A->size*sizeof(int)),"create map");
check_cuda(hipDeviceSynchronize(), "create");
check_cuda(hipMemcpy(gmap,map,A->size*sizeof(int),hipMemcpyHostToDevice),"copy map");
check_cuda(hipDeviceSynchronize(), "copy");
if (mode=="mean") {
B->fill_(0.0);
setDims(A);
hipLaunchKernelGGL(( reduce_mean), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,gmap,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
B->div_(s);
}
else if (mode=="variance") {
Tensor *C=new Tensor(B->getShape(),B->device);
C->fill_(0.0);
setDims(A);
hipLaunchKernelGGL(( reduce_mean), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,C->ptr,gmap,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
C->div_(s);
gpu_reduce_op(A,C,"diff",map);
A->sqr_();
B->fill_(0.0);
hipLaunchKernelGGL(( reduce_mean), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,gmap,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean_var");
B->div_(s);
delete C;
}
else {
throw std::invalid_argument("mode: " + mode + " not yet implemented");
}
check_cuda(hipFree(gmap),"delete_map");
}
void gpu_reduce(Tensor *A, Tensor *B,string mode,MapReduceDescriptor *MD)
{
int device=A->gpu_device;
hipSetDevice(device);
int s=A->size/B->size;
if (MD->gind==nullptr) {
check_cuda(hipMalloc((void**)&(MD->gind),A->size*sizeof(int)),"create map");
check_cuda(hipDeviceSynchronize(), "create");
check_cuda(hipMemcpy(MD->gind,MD->ind,A->size*sizeof(int),hipMemcpyHostToDevice),"copy map");
check_cuda(hipDeviceSynchronize(), "copy");
}
if (mode=="mean") {
B->fill_(0.0);
setDims(A);
hipLaunchKernelGGL(( reduce_mean), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,MD->gind,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
B->div_(s);
}
else if (mode=="variance") {
Tensor *C=new Tensor(B->getShape(),B->device);
C->fill_(0.0);
setDims(A);
hipLaunchKernelGGL(( reduce_mean), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,C->ptr,MD->gind,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
C->div_(s);
gpu_reduce_op(A,C,"diff",MD);
A->sqr_();
B->fill_(0.0);
hipLaunchKernelGGL(( reduce_mean), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,MD->gind,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean_var");
B->div_(s);
delete C;
}
else {
throw std::invalid_argument("mode: " + mode + " not yet implemented");
}
}
void gpu_reduce_op(Tensor *A, Tensor *B,string op,int *map)
{
int device=A->gpu_device;
int *gmap;
hipSetDevice(device);
check_cuda(hipMalloc((void**)&(gmap),A->size*sizeof(int)),"create map");
check_cuda(hipDeviceSynchronize(), "create");
check_cuda(hipMemcpy(gmap,map,A->size*sizeof(int),hipMemcpyHostToDevice),"copy map");
check_cuda(hipDeviceSynchronize(), "copy");
if (op=="sum") {
setDims(A);
hipLaunchKernelGGL(( reduce_op_sum), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,gmap,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
}
else if (op=="diff") {
setDims(A);
hipLaunchKernelGGL(( reduce_op_diff), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,gmap,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
}
else if (op=="mult") {
setDims(A);
hipLaunchKernelGGL(( reduce_op_mult), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,gmap,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
}
else if (op=="div") {
setDims(A);
hipLaunchKernelGGL(( reduce_op_div), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,gmap,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
}
else {
throw std::invalid_argument("op: " + op + " not yet implemented");
}
check_cuda(hipFree(gmap),"delete_map");
}
void gpu_reduce_op(Tensor *A, Tensor *B,string op,MapReduceDescriptor *MD)
{
int device=A->gpu_device;
hipSetDevice(device);
if (MD->gind==nullptr) {
check_cuda(hipMalloc((void**)&(MD->gind),A->size*sizeof(int)),"create map");
check_cuda(hipDeviceSynchronize(), "create");
check_cuda(hipMemcpy(MD->gind,MD->ind,A->size*sizeof(int),hipMemcpyHostToDevice),"copy map");
check_cuda(hipDeviceSynchronize(), "copy");
}
if (op=="sum") {
setDims(A);
hipLaunchKernelGGL(( reduce_op_sum), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,MD->gind,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
}
else if (op=="diff") {
setDims(A);
hipLaunchKernelGGL(( reduce_op_diff), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,MD->gind,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
}
else if (op=="mult") {
setDims(A);
hipLaunchKernelGGL(( reduce_op_mult), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,MD->gind,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
}
else if (op=="div") {
setDims(A);
hipLaunchKernelGGL(( reduce_op_div), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,MD->gind,A->size);
check_cuda(hipDeviceSynchronize(),"reduce_mean");
}
else {
throw std::invalid_argument("op: " + op + " not yet implemented");
}
}
void gpu_reduce_sum2D(Tensor *A,Tensor *B,int axis,int incB){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
if (!incB) gpu_fill_(B,0.0);
hipLaunchKernelGGL(( reduce_sum2D), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,A->shape[0],A->shape[1],axis);
check_cuda(hipDeviceSynchronize(),"reduce_sum2D");
}
void gpu_reduction(ReduceDescriptor *RD){
int device=RD->I->gpu_device;
hipSetDevice(device);
int i,j,d,s,p;
// [MEAN]: Compute items to be reduced
if (RD->m==0) {
d=1;
for(i=0;i<RD->axis.size();i++){
d *= RD->I->shape[RD->axis[i]];
}
}
//////// Init
if (RD->ind==nullptr) {
RD->red_size=RD->index[0].size();
s=RD->index.size()*RD->red_size;
int *ind=(int *)malloc(s*sizeof(int));
for(i=0;i<RD->index.size();i++) {
p=i*RD->red_size;
for(j=0;j<RD->index[i].size();j++,p++)
ind[p]=RD->index[i][j];
}
if (RD->m<2) RD->S=RD->O;
check_cuda(hipMalloc((void**)&(RD->ind),s*sizeof(int)),"create_index");
check_cuda(hipDeviceSynchronize(), "create ind");
check_cuda(hipMemcpy(RD->ind,ind,s*sizeof(int),hipMemcpyHostToDevice),"copy ind");
check_cuda(hipDeviceSynchronize(), "copy");
check_cuda(hipMalloc((void**)&(RD->red),RD->index.size()*sizeof(float)),"create_tensor");
free(ind);
}
/////////////
int fast=0;
if (RD->factor*RD->index.size()<RD->red_size) fast=1;
if ((fast)&&((RD->m==0)&&(RD->keepdims))) {//mean with keepdims=true (BN)
setDims(RD->O);
hipLaunchKernelGGL(( reduction_permute), dim3(dimGrid),dim3(dimBlock), 0, 0, RD->I->ptr, RD->O->ptr, RD->ind, RD->O->size);
check_cuda(hipDeviceSynchronize(), "reduction_kernel");
for(int i=0;i<RD->index.size();i++) {
float *ptr=RD->O->ptr+(i*RD->red_size);
thrust::device_ptr<float> dev_ptr = thrust::device_pointer_cast(ptr);
thrust::device_ptr<float> base = thrust::device_pointer_cast(RD->red);
float sum = thrust::reduce(dev_ptr, dev_ptr + RD->red_size);
thrust::fill(base + i, base + i + 1, (float)sum/RD->red_size);
}
hipLaunchKernelGGL(( reduction_kernel_keep), dim3(dimGrid),dim3(dimBlock), 0, 0, RD->red, RD->O->ptr,RD->ind, RD->index.size(),RD->red_size);
check_cuda(hipDeviceSynchronize(), "reduction_kernel");
}else{ // still slow for max, min on conv
RD->O->fill_(0.0);
dim3 dimGrid(RD->index.size());
dim3 dimBlock(1);
hipLaunchKernelGGL(( reduction_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, RD->I->ptr, RD->O->ptr, RD->S->ptr,RD->m, RD->keepdims,d,RD->ind,RD->red_size);
check_cuda(hipDeviceSynchronize(), "reduction_kernel");
}
}
void gpu_reduction_back(ReduceDescriptor *RD){
int device=RD->I->gpu_device;
hipSetDevice(device);
int d,i;
// [MEAN]: Compute items to be reduced
if (RD->m==0) {
d=1;
for(i=0;i<RD->axis.size();i++){
d *= RD->I->shape[RD->axis[i]];
}
}
int fast=0;
if (RD->factor*RD->index.size()<RD->red_size) fast=1;
if ((fast)&&((RD->m==0)&&(RD->keepdims))) {// mean with keepdims=true (BN)
float *aux;
check_cuda(hipMalloc((void**)&aux,RD->D->size*sizeof(float)),"create_tensor");
setDims(RD->D);
hipLaunchKernelGGL(( reduction_permute), dim3(dimGrid),dim3(dimBlock), 0, 0, RD->D->ptr, aux, RD->ind, RD->O->size);
check_cuda(hipDeviceSynchronize(), "reduction_kernel");
for(int i=0;i<RD->index.size();i++) {
float *ptr=aux+(i*RD->red_size);
thrust::device_ptr<float> dev_ptr = thrust::device_pointer_cast(ptr);
thrust::device_ptr<float> base = thrust::device_pointer_cast(RD->red);
float sum = thrust::reduce(dev_ptr, dev_ptr + RD->red_size);
thrust::fill(base+i, base + i + 1, (float)sum/RD->red_size);
}
check_cuda(hipFree(aux),"delete_tensor");
hipLaunchKernelGGL(( reduction_kernel_keep_inc), dim3(dimGrid),dim3(dimBlock), 0, 0, RD->red, RD->ID->ptr, RD->ind, RD->index.size(),RD->red_size);
check_cuda(hipDeviceSynchronize(), "reduction_kernel");
}else{ // still slow for max, min on conv
dim3 dimGrid(RD->index.size());
dim3 dimBlock(1);
hipLaunchKernelGGL(( reduction_back_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, RD->D->ptr, RD->ID->ptr, RD->S->ptr,RD->m, RD->keepdims,d,RD->ind,RD->red_size);
check_cuda(hipDeviceSynchronize(), "reduction_kernel");
}
}
| 37b664f88d844e26de95f346e50e8ad92258e84c.cu | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.3
* copyright (c) 2019, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre
* Date: October 2019
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas_v2.h>
#include <thrust/device_ptr.h>
//#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
#include <stdexcept>
#include "gpu_tensor.h"
#include "gpu_kernels.h"
#include "gpu_hw.h"
#include "../../tensor/tensor.h"
#include "../../descriptors/descriptors.h"
void gpu_reduce(Tensor *A, Tensor *B,string mode,int* map)
{
int device=A->gpu_device;
int *gmap;
cudaSetDevice(device);
int s=A->size/B->size;
check_cuda(cudaMalloc((void**)&(gmap),A->size*sizeof(int)),"create map");
check_cuda(cudaDeviceSynchronize(), "create");
check_cuda(cudaMemcpy(gmap,map,A->size*sizeof(int),cudaMemcpyHostToDevice),"copy map");
check_cuda(cudaDeviceSynchronize(), "copy");
if (mode=="mean") {
B->fill_(0.0);
setDims(A);
reduce_mean<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,gmap,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
B->div_(s);
}
else if (mode=="variance") {
Tensor *C=new Tensor(B->getShape(),B->device);
C->fill_(0.0);
setDims(A);
reduce_mean<<<dimGrid,dimBlock>>>(A->ptr,C->ptr,gmap,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
C->div_(s);
gpu_reduce_op(A,C,"diff",map);
A->sqr_();
B->fill_(0.0);
reduce_mean<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,gmap,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean_var");
B->div_(s);
delete C;
}
else {
throw std::invalid_argument("mode: " + mode + " not yet implemented");
}
check_cuda(cudaFree(gmap),"delete_map");
}
void gpu_reduce(Tensor *A, Tensor *B,string mode,MapReduceDescriptor *MD)
{
int device=A->gpu_device;
cudaSetDevice(device);
int s=A->size/B->size;
if (MD->gind==nullptr) {
check_cuda(cudaMalloc((void**)&(MD->gind),A->size*sizeof(int)),"create map");
check_cuda(cudaDeviceSynchronize(), "create");
check_cuda(cudaMemcpy(MD->gind,MD->ind,A->size*sizeof(int),cudaMemcpyHostToDevice),"copy map");
check_cuda(cudaDeviceSynchronize(), "copy");
}
if (mode=="mean") {
B->fill_(0.0);
setDims(A);
reduce_mean<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,MD->gind,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
B->div_(s);
}
else if (mode=="variance") {
Tensor *C=new Tensor(B->getShape(),B->device);
C->fill_(0.0);
setDims(A);
reduce_mean<<<dimGrid,dimBlock>>>(A->ptr,C->ptr,MD->gind,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
C->div_(s);
gpu_reduce_op(A,C,"diff",MD);
A->sqr_();
B->fill_(0.0);
reduce_mean<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,MD->gind,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean_var");
B->div_(s);
delete C;
}
else {
throw std::invalid_argument("mode: " + mode + " not yet implemented");
}
}
void gpu_reduce_op(Tensor *A, Tensor *B,string op,int *map)
{
int device=A->gpu_device;
int *gmap;
cudaSetDevice(device);
check_cuda(cudaMalloc((void**)&(gmap),A->size*sizeof(int)),"create map");
check_cuda(cudaDeviceSynchronize(), "create");
check_cuda(cudaMemcpy(gmap,map,A->size*sizeof(int),cudaMemcpyHostToDevice),"copy map");
check_cuda(cudaDeviceSynchronize(), "copy");
if (op=="sum") {
setDims(A);
reduce_op_sum<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,gmap,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
}
else if (op=="diff") {
setDims(A);
reduce_op_diff<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,gmap,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
}
else if (op=="mult") {
setDims(A);
reduce_op_mult<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,gmap,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
}
else if (op=="div") {
setDims(A);
reduce_op_div<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,gmap,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
}
else {
throw std::invalid_argument("op: " + op + " not yet implemented");
}
check_cuda(cudaFree(gmap),"delete_map");
}
void gpu_reduce_op(Tensor *A, Tensor *B,string op,MapReduceDescriptor *MD)
{
int device=A->gpu_device;
cudaSetDevice(device);
if (MD->gind==nullptr) {
check_cuda(cudaMalloc((void**)&(MD->gind),A->size*sizeof(int)),"create map");
check_cuda(cudaDeviceSynchronize(), "create");
check_cuda(cudaMemcpy(MD->gind,MD->ind,A->size*sizeof(int),cudaMemcpyHostToDevice),"copy map");
check_cuda(cudaDeviceSynchronize(), "copy");
}
if (op=="sum") {
setDims(A);
reduce_op_sum<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,MD->gind,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
}
else if (op=="diff") {
setDims(A);
reduce_op_diff<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,MD->gind,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
}
else if (op=="mult") {
setDims(A);
reduce_op_mult<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,MD->gind,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
}
else if (op=="div") {
setDims(A);
reduce_op_div<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,MD->gind,A->size);
check_cuda(cudaDeviceSynchronize(),"reduce_mean");
}
else {
throw std::invalid_argument("op: " + op + " not yet implemented");
}
}
void gpu_reduce_sum2D(Tensor *A,Tensor *B,int axis,int incB){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
if (!incB) gpu_fill_(B,0.0);
reduce_sum2D<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,A->shape[0],A->shape[1],axis);
check_cuda(cudaDeviceSynchronize(),"reduce_sum2D");
}
void gpu_reduction(ReduceDescriptor *RD){
int device=RD->I->gpu_device;
cudaSetDevice(device);
int i,j,d,s,p;
// [MEAN]: Compute items to be reduced
if (RD->m==0) {
d=1;
for(i=0;i<RD->axis.size();i++){
d *= RD->I->shape[RD->axis[i]];
}
}
//////// Init
if (RD->ind==nullptr) {
RD->red_size=RD->index[0].size();
s=RD->index.size()*RD->red_size;
int *ind=(int *)malloc(s*sizeof(int));
for(i=0;i<RD->index.size();i++) {
p=i*RD->red_size;
for(j=0;j<RD->index[i].size();j++,p++)
ind[p]=RD->index[i][j];
}
if (RD->m<2) RD->S=RD->O;
check_cuda(cudaMalloc((void**)&(RD->ind),s*sizeof(int)),"create_index");
check_cuda(cudaDeviceSynchronize(), "create ind");
check_cuda(cudaMemcpy(RD->ind,ind,s*sizeof(int),cudaMemcpyHostToDevice),"copy ind");
check_cuda(cudaDeviceSynchronize(), "copy");
check_cuda(cudaMalloc((void**)&(RD->red),RD->index.size()*sizeof(float)),"create_tensor");
free(ind);
}
/////////////
int fast=0;
if (RD->factor*RD->index.size()<RD->red_size) fast=1;
if ((fast)&&((RD->m==0)&&(RD->keepdims))) {//mean with keepdims=true (BN)
setDims(RD->O);
reduction_permute<<<dimGrid,dimBlock>>>(RD->I->ptr, RD->O->ptr, RD->ind, RD->O->size);
check_cuda(cudaDeviceSynchronize(), "reduction_kernel");
for(int i=0;i<RD->index.size();i++) {
float *ptr=RD->O->ptr+(i*RD->red_size);
thrust::device_ptr<float> dev_ptr = thrust::device_pointer_cast(ptr);
thrust::device_ptr<float> base = thrust::device_pointer_cast(RD->red);
float sum = thrust::reduce(dev_ptr, dev_ptr + RD->red_size);
thrust::fill(base + i, base + i + 1, (float)sum/RD->red_size);
}
reduction_kernel_keep<<<dimGrid,dimBlock>>>(RD->red, RD->O->ptr,RD->ind, RD->index.size(),RD->red_size);
check_cuda(cudaDeviceSynchronize(), "reduction_kernel");
}else{ // still slow for max, min on conv
RD->O->fill_(0.0);
dim3 dimGrid(RD->index.size());
dim3 dimBlock(1);
reduction_kernel<<<dimGrid,dimBlock>>>(RD->I->ptr, RD->O->ptr, RD->S->ptr,RD->m, RD->keepdims,d,RD->ind,RD->red_size);
check_cuda(cudaDeviceSynchronize(), "reduction_kernel");
}
}
void gpu_reduction_back(ReduceDescriptor *RD){
int device=RD->I->gpu_device;
cudaSetDevice(device);
int d,i;
// [MEAN]: Compute items to be reduced
if (RD->m==0) {
d=1;
for(i=0;i<RD->axis.size();i++){
d *= RD->I->shape[RD->axis[i]];
}
}
int fast=0;
if (RD->factor*RD->index.size()<RD->red_size) fast=1;
if ((fast)&&((RD->m==0)&&(RD->keepdims))) {// mean with keepdims=true (BN)
float *aux;
check_cuda(cudaMalloc((void**)&aux,RD->D->size*sizeof(float)),"create_tensor");
setDims(RD->D);
reduction_permute<<<dimGrid,dimBlock>>>(RD->D->ptr, aux, RD->ind, RD->O->size);
check_cuda(cudaDeviceSynchronize(), "reduction_kernel");
for(int i=0;i<RD->index.size();i++) {
float *ptr=aux+(i*RD->red_size);
thrust::device_ptr<float> dev_ptr = thrust::device_pointer_cast(ptr);
thrust::device_ptr<float> base = thrust::device_pointer_cast(RD->red);
float sum = thrust::reduce(dev_ptr, dev_ptr + RD->red_size);
thrust::fill(base+i, base + i + 1, (float)sum/RD->red_size);
}
check_cuda(cudaFree(aux),"delete_tensor");
reduction_kernel_keep_inc<<<dimGrid,dimBlock>>>(RD->red, RD->ID->ptr, RD->ind, RD->index.size(),RD->red_size);
check_cuda(cudaDeviceSynchronize(), "reduction_kernel");
}else{ // still slow for max, min on conv
dim3 dimGrid(RD->index.size());
dim3 dimBlock(1);
reduction_back_kernel<<<dimGrid,dimBlock>>>(RD->D->ptr, RD->ID->ptr, RD->S->ptr,RD->m, RD->keepdims,d,RD->ind,RD->red_size);
check_cuda(cudaDeviceSynchronize(), "reduction_kernel");
}
}
|
678d0435b730999af8802c154cc28200ac737d71.hip | // !!! This is a file automatically generated by hipify!!!
/*
* cuTS: Scaling Subgraph Isomorphism on Distributed Multi-GPU Systems Using
* Trie Based Data Structure
*
* Copyright (C) 2021 APPL Laboratories ([email protected])
*
* This software is available under the MIT license, a copy of which can be
* found in the file 'LICENSE' in the top-level directory.
*
* For further information contact:
* (1) Lizhi Xiang ([email protected])
* (2) Aravind Sukumaran-Rajam ([email protected])
*
* The citation information is provided in the 'README' in the top-level
* directory.
*/
#include "../inc/free_memories.h"
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cout<<hipGetErrorString(code)<<std::endl;
exit(-1);
}
}
void free_graph_gpu_memory(G_pointers &p){
chkerr(hipFree(p.neighbors));
chkerr(hipFree(p.r_neighbors));
chkerr(hipFree(p.neighbors_offset));
chkerr(hipFree(p.r_neighbors_offset));
chkerr(hipFree(p.signatures));
}
void free_query_constraints_gpu_memory(C_pointers &p){
chkerr(hipFree(p.order_sqeuence));
chkerr(hipFree(p.children));
chkerr(hipFree(p.children_offset));
chkerr(hipFree(p.parents));
chkerr(hipFree(p.parents_offset));
}
void free_other_searching_gpu_memory(S_pointers &p){
chkerr(hipFree(p.indexes_table));
chkerr(hipFree(p.results_table));
chkerr(hipFree(p.helper_buffer1));
chkerr(hipFree(p.helper_buffer2));
chkerr(hipFree(p.lengths));
} | 678d0435b730999af8802c154cc28200ac737d71.cu | /*
* cuTS: Scaling Subgraph Isomorphism on Distributed Multi-GPU Systems Using
* Trie Based Data Structure
*
* Copyright (C) 2021 APPL Laboratories ([email protected])
*
* This software is available under the MIT license, a copy of which can be
* found in the file 'LICENSE' in the top-level directory.
*
* For further information contact:
* (1) Lizhi Xiang ([email protected])
* (2) Aravind Sukumaran-Rajam ([email protected])
*
* The citation information is provided in the 'README' in the top-level
* directory.
*/
#include "../inc/free_memories.h"
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cout<<cudaGetErrorString(code)<<std::endl;
exit(-1);
}
}
void free_graph_gpu_memory(G_pointers &p){
chkerr(cudaFree(p.neighbors));
chkerr(cudaFree(p.r_neighbors));
chkerr(cudaFree(p.neighbors_offset));
chkerr(cudaFree(p.r_neighbors_offset));
chkerr(cudaFree(p.signatures));
}
void free_query_constraints_gpu_memory(C_pointers &p){
chkerr(cudaFree(p.order_sqeuence));
chkerr(cudaFree(p.children));
chkerr(cudaFree(p.children_offset));
chkerr(cudaFree(p.parents));
chkerr(cudaFree(p.parents_offset));
}
void free_other_searching_gpu_memory(S_pointers &p){
chkerr(cudaFree(p.indexes_table));
chkerr(cudaFree(p.results_table));
chkerr(cudaFree(p.helper_buffer1));
chkerr(cudaFree(p.helper_buffer2));
chkerr(cudaFree(p.lengths));
} |
3b3371f6e323fe9ca770f30d7182dbde338f24d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// transpose.cpp
#include <nbla/array.hpp>
#include <nbla/variable.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/transpose.hpp>
namespace nbla {
template <typename T>
__global__ void
kernel_transpose_forward(const int num, const int ndim, const int64_t *axes,
const int64_t *x_strides, const int64_t *y_strides,
const int64_t *y_shape, const T *x, T *y) {
NBLA_CUDA_KERNEL_LOOP(o, num) {
int i = 0;
for (int d = 0; d < ndim; ++d) {
const int k = int(o / y_strides[d]) % y_shape[d];
i += k * x_strides[axes[d]];
}
y[o] = x[i];
}
}
template <typename T, bool accum>
__global__ void
kernel_transpose_backward(const int num, const int ndim, const int64_t *axes,
const int64_t *x_strides, const int64_t *y_strides,
const int64_t *y_shape, const T *dy, T *dx) {
NBLA_CUDA_KERNEL_LOOP(o, num) {
int i = 0;
for (int d = 0; d < ndim; ++d) {
const int k = int(o / y_strides[d]) % y_shape[d];
i += k * x_strides[axes[d]];
}
dx[i] = (accum ? dx[i] : (T)0) + dy[o];
}
}
template <typename T>
void TransposeCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
Transpose<T>::setup_impl(inputs, outputs);
}
template <class T>
void TransposeCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
// To avoid compiler error : type name is not allowed.
// The following statement causes a compiler error.
// this->v_axes_.get_data_pointer<int64_t>(this->ctx_)
auto get_ = [this](Variable &var) {
return var.get_data_pointer<int64_t>(this->ctx_);
};
const int64_t *axes = get_(this->v_axes_);
const int64_t *x_strides = get_(this->v_x_strides_);
const int64_t *y_strides = get_(this->v_y_strides_);
const int64_t *y_shape = get_(this->v_y_shape_);
const int ndim = inputs[0]->ndim();
const int size = outputs[0]->size();
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_transpose_forward, size, ndim, axes,
x_strides, y_strides, y_shape, x, y);
}
template <class T>
void TransposeCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0])
return;
cuda_set_device(std::stoi(this->ctx_.device_id));
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]);
// To avoid compiler error : type name is not allowed.
// The following statement causes a compiler error.
// this->v_axes_.get_data_pointer<int64_t>(this->ctx_)
auto get_ = [this](Variable &var) {
return var.get_data_pointer<int64_t>(this->ctx_);
};
const int64_t *axes = get_(this->v_axes_);
const int64_t *x_strides = get_(this->v_x_strides_);
const int64_t *y_strides = get_(this->v_y_strides_);
const int64_t *y_shape = get_(this->v_y_shape_);
const int ndim = inputs[0]->ndim();
const int size = outputs[0]->size();
if (accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_transpose_backward<Tc, true>), size,
ndim, axes, x_strides, y_strides, y_shape,
dy, dx);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_transpose_backward<Tc, false>), size,
ndim, axes, x_strides, y_strides, y_shape,
dy, dx);
}
}
}
| 3b3371f6e323fe9ca770f30d7182dbde338f24d1.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// transpose.cpp
#include <nbla/array.hpp>
#include <nbla/variable.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/transpose.hpp>
namespace nbla {
template <typename T>
__global__ void
kernel_transpose_forward(const int num, const int ndim, const int64_t *axes,
const int64_t *x_strides, const int64_t *y_strides,
const int64_t *y_shape, const T *x, T *y) {
NBLA_CUDA_KERNEL_LOOP(o, num) {
int i = 0;
for (int d = 0; d < ndim; ++d) {
const int k = int(o / y_strides[d]) % y_shape[d];
i += k * x_strides[axes[d]];
}
y[o] = x[i];
}
}
template <typename T, bool accum>
__global__ void
kernel_transpose_backward(const int num, const int ndim, const int64_t *axes,
const int64_t *x_strides, const int64_t *y_strides,
const int64_t *y_shape, const T *dy, T *dx) {
NBLA_CUDA_KERNEL_LOOP(o, num) {
int i = 0;
for (int d = 0; d < ndim; ++d) {
const int k = int(o / y_strides[d]) % y_shape[d];
i += k * x_strides[axes[d]];
}
dx[i] = (accum ? dx[i] : (T)0) + dy[o];
}
}
template <typename T>
void TransposeCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
Transpose<T>::setup_impl(inputs, outputs);
}
template <class T>
void TransposeCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
// To avoid compiler error : type name is not allowed.
// The following statement causes a compiler error.
// this->v_axes_.get_data_pointer<int64_t>(this->ctx_)
auto get_ = [this](Variable &var) {
return var.get_data_pointer<int64_t>(this->ctx_);
};
const int64_t *axes = get_(this->v_axes_);
const int64_t *x_strides = get_(this->v_x_strides_);
const int64_t *y_strides = get_(this->v_y_strides_);
const int64_t *y_shape = get_(this->v_y_shape_);
const int ndim = inputs[0]->ndim();
const int size = outputs[0]->size();
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_transpose_forward, size, ndim, axes,
x_strides, y_strides, y_shape, x, y);
}
template <class T>
void TransposeCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0])
return;
cuda_set_device(std::stoi(this->ctx_.device_id));
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]);
// To avoid compiler error : type name is not allowed.
// The following statement causes a compiler error.
// this->v_axes_.get_data_pointer<int64_t>(this->ctx_)
auto get_ = [this](Variable &var) {
return var.get_data_pointer<int64_t>(this->ctx_);
};
const int64_t *axes = get_(this->v_axes_);
const int64_t *x_strides = get_(this->v_x_strides_);
const int64_t *y_strides = get_(this->v_y_strides_);
const int64_t *y_shape = get_(this->v_y_shape_);
const int ndim = inputs[0]->ndim();
const int size = outputs[0]->size();
if (accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_transpose_backward<Tc, true>), size,
ndim, axes, x_strides, y_strides, y_shape,
dy, dx);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_transpose_backward<Tc, false>), size,
ndim, axes, x_strides, y_strides, y_shape,
dy, dx);
}
}
}
|
23e8b51ceb2cc69573aec9d057a15920c7a65514.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cmath>
#include <ctime>
#include <chrono>
#include <random>
#define N 100000000
#define TIMING
#ifdef TIMING
#define INIT_TIMER auto start = std::chrono::high_resolution_clock::now();
#define START_TIMER start = std::chrono::high_resolution_clock::now();
#define STOP_TIMER std::cout << "Runtime of " << N << ": " << \
std::chrono::duration_cast<std::chrono::milliseconds>( \
std::chrono::high_resolution_clock::now()-start \
).count() << " ms " << std:: endl;
#else
#define INIT_TIMER
#define START_TIMER
#define START_TIMER()
#endif
__global__ void buildPoint(double *a, double *b, double *c) {
if(pow(a[blockIdx.x], 2) + pow(b[blockIdx.x],2) <= 0.25) {
c[blockIdx.x] = 1.0;
}
}
int main(void) {
INIT_TIMER
START_TIMER
std::random_device rd;
std::mt19937 eng(rd());
std::uniform_real_distribution<> distr(-0.5, 0.5);
double *a, *b, *c;
double *d_a, *d_b, *d_c;
int size = N * sizeof(double);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
a = (double *)malloc(size);
b = (double *)malloc(size);
c = (double *)malloc(size);
for(int i = 0; i < N; ++i) {
a[i] = distr(eng);
b[i] = distr(eng);
}
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( buildPoint), dim3(N),dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
double pointsIn = 0;
for(int i = 0; i < N; ++i)
pointsIn += c[i];
STOP_TIMER
std::cout.precision(9);
std::cout << (4 * pointsIn) / N << std::endl;
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} | 23e8b51ceb2cc69573aec9d057a15920c7a65514.cu | #include <iostream>
#include <cmath>
#include <ctime>
#include <chrono>
#include <random>
#define N 100000000
#define TIMING
#ifdef TIMING
#define INIT_TIMER auto start = std::chrono::high_resolution_clock::now();
#define START_TIMER start = std::chrono::high_resolution_clock::now();
#define STOP_TIMER std::cout << "Runtime of " << N << ": " << \
std::chrono::duration_cast<std::chrono::milliseconds>( \
std::chrono::high_resolution_clock::now()-start \
).count() << " ms " << std:: endl;
#else
#define INIT_TIMER
#define START_TIMER
#define START_TIMER()
#endif
__global__ void buildPoint(double *a, double *b, double *c) {
if(pow(a[blockIdx.x], 2) + pow(b[blockIdx.x],2) <= 0.25) {
c[blockIdx.x] = 1.0;
}
}
int main(void) {
INIT_TIMER
START_TIMER
std::random_device rd;
std::mt19937 eng(rd());
std::uniform_real_distribution<> distr(-0.5, 0.5);
double *a, *b, *c;
double *d_a, *d_b, *d_c;
int size = N * sizeof(double);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
a = (double *)malloc(size);
b = (double *)malloc(size);
c = (double *)malloc(size);
for(int i = 0; i < N; ++i) {
a[i] = distr(eng);
b[i] = distr(eng);
}
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
buildPoint<<<N,1>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
double pointsIn = 0;
for(int i = 0; i < N; ++i)
pointsIn += c[i];
STOP_TIMER
std::cout.precision(9);
std::cout << (4 * pointsIn) / N << std::endl;
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
fcafd42422c0a15709a9513b8226b5e83d1d41b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@precisions normal z -> s d c
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, magmaDoubleComplex* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
template< int n >
__device__ void sum_reduce_2d( /*int n,*/ int i, int c, magmaDoubleComplex x[][BLOCK_SIZEy+1] )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i][c] += x[i+1024][c]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i][c] += x[i+ 512][c]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i][c] += x[i+ 256][c]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i][c] += x[i+ 128][c]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i][c] += x[i+ 64][c]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i][c] += x[i+ 32][c]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i][c] += x[i+ 16][c]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i][c] += x[i+ 8][c]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i][c] += x[i+ 4][c]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i][c] += x[i+ 2][c]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i][c] += x[i+ 1][c]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
__global__
void magma_zlarf_kernel( int m, magmaDoubleComplex *v, magmaDoubleComplex *tau,
magmaDoubleComplex *c, int ldc, double *xnorm )
{
if ( !MAGMA_Z_EQUAL(*tau, MAGMA_Z_ZERO) ) {
const int i = threadIdx.x;
magmaDoubleComplex *dc = c + blockIdx.x * ldc;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
/* w := v' * C */
lsum = MAGMA_Z_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE ){
if (j==0)
lsum += MAGMA_Z_MUL( MAGMA_Z_ONE, dc[j] );
else
lsum += MAGMA_Z_MUL( MAGMA_Z_CNJG( v[j] ), dc[j] );
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
/* C := C - v * w */
__syncthreads();
magmaDoubleComplex z__1 = - MAGMA_Z_CNJG(*tau) * sum[0];
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE ) {
if (j==0)
dc[j] += z__1;
else
dc[j] += z__1 * v[j];
}
__syncthreads();
/* Adjust the rest of the column norms */
if (i==0){
double temp = MAGMA_Z_ABS( dc[0] ) / xnorm[blockIdx.x];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x] = xnorm[blockIdx.x] * sqrt(temp);
}
}
}
//==============================================================================
__global__
void magma_zlarf_smkernel( int m, int n, magmaDoubleComplex *v, magmaDoubleComplex *tau,
magmaDoubleComplex *c, int ldc, double *xnorm )
{
if ( !MAGMA_Z_EQUAL(*tau, MAGMA_Z_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k+= BLOCK_SIZEy)
{
magmaDoubleComplex *dc = c + k * ldc;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
magmaDoubleComplex lsum;
/* w := v' * C */
lsum = MAGMA_Z_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ){
if (j==0)
lsum += MAGMA_Z_MUL( MAGMA_Z_ONE, dc[j] );
else
lsum += MAGMA_Z_MUL( MAGMA_Z_CNJG( v[j] ), dc[j] );
}
sum[i][col] = lsum;
sum_reduce_2d< BLOCK_SIZEx >( i, col, sum );
/* C := C - v * w */
__syncthreads();
magmaDoubleComplex z__1 = - MAGMA_Z_CNJG(*tau) * sum[0][col];
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZEx ) {
if (j==0)
dc[j] += z__1;
else
dc[j] += z__1 * v[j];
}
__syncthreads();
/* Adjust the rest of the column norms */
if (i==0){
double temp = MAGMA_Z_ABS( dc[0] ) / xnorm[k];
temp = (temp + 1.) * (1. - temp);
xnorm[k] = xnorm[k] * sqrt(temp);
}
}
}
}
//==============================================================================
/*
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_zlarf_sm(int m, int n, magmaDoubleComplex *v, magmaDoubleComplex *tau,
magmaDoubleComplex *c, int ldc, double *xnorm)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
hipLaunchKernelGGL(( magma_zlarf_smkernel), dim3(blocks), dim3(threads), 0, magma_stream , m, n, v, tau, c, ldc, xnorm);
}
//==============================================================================
/*
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's zlarf routine.
*/
extern "C" magma_int_t
magma_zlarf_gpu(
magma_int_t m, magma_int_t n,
magmaDoubleComplex *v, magmaDoubleComplex *tau,
magmaDoubleComplex *c, magma_int_t ldc, double *xnorm)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_zlarf_kernel), dim3(blocks), dim3(threads), 0, magma_stream , m, v, tau, c, ldc, xnorm);
// The computation can be done on 1 SM with the following routine.
// magma_zlarf_sm(m, n, v, tau, c, ldc, xnorm);
return MAGMA_SUCCESS;
}
//==============================================================================
| fcafd42422c0a15709a9513b8226b5e83d1d41b2.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@precisions normal z -> s d c
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, magmaDoubleComplex* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
template< int n >
__device__ void sum_reduce_2d( /*int n,*/ int i, int c, magmaDoubleComplex x[][BLOCK_SIZEy+1] )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i][c] += x[i+1024][c]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i][c] += x[i+ 512][c]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i][c] += x[i+ 256][c]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i][c] += x[i+ 128][c]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i][c] += x[i+ 64][c]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i][c] += x[i+ 32][c]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i][c] += x[i+ 16][c]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i][c] += x[i+ 8][c]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i][c] += x[i+ 4][c]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i][c] += x[i+ 2][c]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i][c] += x[i+ 1][c]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
__global__
void magma_zlarf_kernel( int m, magmaDoubleComplex *v, magmaDoubleComplex *tau,
magmaDoubleComplex *c, int ldc, double *xnorm )
{
if ( !MAGMA_Z_EQUAL(*tau, MAGMA_Z_ZERO) ) {
const int i = threadIdx.x;
magmaDoubleComplex *dc = c + blockIdx.x * ldc;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
/* w := v' * C */
lsum = MAGMA_Z_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE ){
if (j==0)
lsum += MAGMA_Z_MUL( MAGMA_Z_ONE, dc[j] );
else
lsum += MAGMA_Z_MUL( MAGMA_Z_CNJG( v[j] ), dc[j] );
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
/* C := C - v * w */
__syncthreads();
magmaDoubleComplex z__1 = - MAGMA_Z_CNJG(*tau) * sum[0];
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE ) {
if (j==0)
dc[j] += z__1;
else
dc[j] += z__1 * v[j];
}
__syncthreads();
/* Adjust the rest of the column norms */
if (i==0){
double temp = MAGMA_Z_ABS( dc[0] ) / xnorm[blockIdx.x];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x] = xnorm[blockIdx.x] * sqrt(temp);
}
}
}
//==============================================================================
__global__
void magma_zlarf_smkernel( int m, int n, magmaDoubleComplex *v, magmaDoubleComplex *tau,
magmaDoubleComplex *c, int ldc, double *xnorm )
{
if ( !MAGMA_Z_EQUAL(*tau, MAGMA_Z_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k+= BLOCK_SIZEy)
{
magmaDoubleComplex *dc = c + k * ldc;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
magmaDoubleComplex lsum;
/* w := v' * C */
lsum = MAGMA_Z_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ){
if (j==0)
lsum += MAGMA_Z_MUL( MAGMA_Z_ONE, dc[j] );
else
lsum += MAGMA_Z_MUL( MAGMA_Z_CNJG( v[j] ), dc[j] );
}
sum[i][col] = lsum;
sum_reduce_2d< BLOCK_SIZEx >( i, col, sum );
/* C := C - v * w */
__syncthreads();
magmaDoubleComplex z__1 = - MAGMA_Z_CNJG(*tau) * sum[0][col];
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZEx ) {
if (j==0)
dc[j] += z__1;
else
dc[j] += z__1 * v[j];
}
__syncthreads();
/* Adjust the rest of the column norms */
if (i==0){
double temp = MAGMA_Z_ABS( dc[0] ) / xnorm[k];
temp = (temp + 1.) * (1. - temp);
xnorm[k] = xnorm[k] * sqrt(temp);
}
}
}
}
//==============================================================================
/*
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_zlarf_sm(int m, int n, magmaDoubleComplex *v, magmaDoubleComplex *tau,
magmaDoubleComplex *c, int ldc, double *xnorm)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
magma_zlarf_smkernel<<< blocks, threads, 0, magma_stream >>>( m, n, v, tau, c, ldc, xnorm);
}
//==============================================================================
/*
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's zlarf routine.
*/
extern "C" magma_int_t
magma_zlarf_gpu(
magma_int_t m, magma_int_t n,
magmaDoubleComplex *v, magmaDoubleComplex *tau,
magmaDoubleComplex *c, magma_int_t ldc, double *xnorm)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
magma_zlarf_kernel<<< blocks, threads, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm);
// The computation can be done on 1 SM with the following routine.
// magma_zlarf_sm(m, n, v, tau, c, ldc, xnorm);
return MAGMA_SUCCESS;
}
//==============================================================================
|
b9b02c4395fbc18f799e1c23a66000fce160bbf6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "radonusfft.cuh"
#include "kernels_tomo.cu"
#include "shift.hip"
#include "filter.hip"
radonusfft::radonusfft(size_t ntheta, size_t pnz, size_t n, float center,
size_t theta_, size_t ngpus)
: ntheta(ntheta), pnz(pnz), n(n), center(center), ngpus(ngpus) {
float eps = 1e-2;
mu = -log(eps) / (2 * n * n);
m = ceil(2 * n * 1 / PI * sqrt(-mu * log(eps) + (mu * n) * (mu * n) / 4));
f = new float2*[ngpus];
g = new float2*[ngpus];
fdee = new float2*[ngpus];
x = new float*[ngpus];
y = new float*[ngpus];
shiftfwd = new float2*[ngpus];
shiftadj = new float2*[ngpus];
theta = new float*[ngpus];
plan1d = new hipfftHandle[ngpus];
plan2d = new hipfftHandle[ngpus];
for (int igpu=0;igpu<ngpus;igpu++)
{
hipSetDevice(igpu);
hipMalloc((void **)&f[igpu], n * n * pnz * sizeof(float2));
hipMalloc((void **)&g[igpu], n * ntheta * pnz * sizeof(float2));
hipMalloc((void **)&fdee[igpu],
(2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2));
hipMalloc((void **)&x[igpu], n * ntheta * sizeof(float));
hipMalloc((void **)&y[igpu], n * ntheta * sizeof(float));
hipMalloc((void **)&theta[igpu], ntheta * sizeof(float));
hipMemcpy(theta[igpu], (float *)theta_, ntheta * sizeof(float), hipMemcpyDefault);
int ffts[2];
int idist;
int inembed[2];
// fft 2d
ffts[0] = 2 * n;
ffts[1] = 2 * n;
idist = (2 * n + 2 * m) * (2 * n + 2 * m);
inembed[0] = 2 * n + 2 * m;
inembed[1] = 2 * n + 2 * m;
hipfftPlanMany(&plan2d[igpu], 2, ffts, inembed, 1, idist, inembed, 1, idist,
HIPFFT_C2C, pnz);
// fft 1d
ffts[0] = n;
idist = n;
inembed[0] = n;
hipfftPlanMany(&plan1d[igpu], 1, ffts, inembed, 1, idist, inembed, 1, idist,
HIPFFT_C2C, ntheta * pnz);
hipMalloc((void **)&shiftfwd[igpu], n * sizeof(float2));
hipMalloc((void **)&shiftadj[igpu], n * sizeof(float2));
// compute shifts with respect to the rotation center
hipLaunchKernelGGL(( takeshift) , dim3(ceil(n / 1024.0)), dim3(1024), 0, 0, shiftfwd[igpu], -(center - n / 2.0), n);
hipLaunchKernelGGL(( takeshift) , dim3(ceil(n / 1024.0)), dim3(1024), 0, 0, shiftadj[igpu], (center - n / 2.0), n);
}
//back tp 0
hipSetDevice(0);
BS2d = dim3(32, 32);
BS3d = dim3(32, 32, 1);
GS2d0 = dim3(ceil(n / (float)BS2d.x), ceil(ntheta / (float)BS2d.y));
GS3d0 = dim3(ceil(n / (float)BS3d.x), ceil(n / (float)BS3d.y),
ceil(pnz / (float)BS3d.z));
GS3d1 = dim3(ceil(2 * n / (float)BS3d.x), ceil(2 * n / (float)BS3d.y),
ceil(pnz / (float)BS3d.z));
GS3d2 = dim3(ceil((2 * n + 2 * m) / (float)BS3d.x),
ceil((2 * n + 2 * m) / (float)BS3d.y), ceil(pnz / (float)BS3d.z));
GS3d3 = dim3(ceil(n / (float)BS3d.x), ceil(ntheta / (float)BS3d.y),
ceil(pnz / (float)BS3d.z));
}
// destructor, memory deallocation
radonusfft::~radonusfft() { free(); }
void radonusfft::free() {
if (!is_free) {
for(int igpu=0;igpu<ngpus;igpu++)
{
hipSetDevice(igpu);
hipFree(f[igpu]);
hipFree(g[igpu]);
hipFree(fdee[igpu]);
hipFree(x[igpu]);
hipFree(y[igpu]);
hipFree(shiftfwd[igpu]);
hipFree(shiftadj[igpu]);
hipfftDestroy(plan2d[igpu]);
hipfftDestroy(plan1d[igpu]);
}
hipFree(f);
hipFree(g);
hipFree(fdee);
hipFree(x);
hipFree(y);
hipFree(shiftfwd);
hipFree(shiftadj);
is_free = true;
}
}
void radonusfft::fwd(size_t g_, size_t f_, size_t igpu) {
hipSetDevice(igpu);
float2* f0 = (float2 *)f_;
hipMemcpy(f[igpu], f0, n * n * pnz * sizeof(float2), hipMemcpyDefault);
hipMemset(fdee[igpu], 0, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2));
//circ <<<GS3d0, BS3d>>> (f, 1.0f / n, n, pnz);
hipLaunchKernelGGL(( takexy) , dim3(GS2d0), dim3(BS2d), 0, 0, x[igpu], y[igpu], theta[igpu], n, ntheta);
hipLaunchKernelGGL(( divphi) , dim3(GS3d2), dim3(BS3d), 0, 0, fdee[igpu], f[igpu], mu, n, pnz, m, TOMO_FWD);
hipLaunchKernelGGL(( fftshiftc) , dim3(GS3d2), dim3(BS3d), 0, 0, fdee[igpu], 2 * n + 2 * m, pnz);
hipfftExecC2C(plan2d[igpu], (hipfftComplex *)&fdee[igpu][m + m * (2 * n + 2 * m)],
(hipfftComplex *)&fdee[igpu][m + m * (2 * n + 2 * m)], HIPFFT_FORWARD);
hipLaunchKernelGGL(( fftshiftc) , dim3(GS3d2), dim3(BS3d), 0, 0, fdee[igpu], 2 * n + 2 * m, pnz);
hipLaunchKernelGGL(( wrap) , dim3(GS3d2), dim3(BS3d), 0, 0, fdee[igpu], n, pnz, m, TOMO_FWD);
hipLaunchKernelGGL(( gather) , dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu], fdee[igpu], x[igpu], y[igpu], m, mu, n, ntheta, pnz, TOMO_FWD);
// shift with respect to given center
hipLaunchKernelGGL(( shift) , dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu], shiftfwd[igpu], n, ntheta, pnz);
hipLaunchKernelGGL(( ifftshiftc) , dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu], n, ntheta, pnz);
hipfftExecC2C(plan1d[igpu], (hipfftComplex *)g[igpu], (hipfftComplex *)g[igpu], HIPFFT_BACKWARD);
hipLaunchKernelGGL(( ifftshiftc) , dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu], n, ntheta, pnz);
if(n%4!=0)
hipLaunchKernelGGL(( ifftshiftcmul) , dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu], n, ntheta, pnz);
float2* g0 = (float2 *)g_;
for (int i=0;i<ntheta;i++)
hipMemcpy(&g0[i*n*pnz], &g[igpu][i*n*pnz], n * pnz * sizeof(float2), hipMemcpyDefault);
}
void radonusfft::adj(size_t f_, size_t g_, size_t igpu, bool filter) {
hipSetDevice(igpu);
float2* g0 = (float2 *)g_;
for (int i=0;i<ntheta;i++)
hipMemcpy(&g[igpu][i*n*pnz],&g0[i*n*pnz], n * pnz * sizeof(float2), hipMemcpyDefault);
hipMemset(fdee[igpu], 0, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2));
hipLaunchKernelGGL(( takexy) , dim3(GS2d0), dim3(BS2d), 0, 0, x[igpu], y[igpu], theta[igpu], n, ntheta);
hipLaunchKernelGGL(( ifftshiftc) , dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu], n, ntheta, pnz);
hipfftExecC2C(plan1d[igpu], (hipfftComplex *)g[igpu], (hipfftComplex *)g[igpu], HIPFFT_FORWARD);
hipLaunchKernelGGL(( ifftshiftc) , dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu], n, ntheta, pnz);
if(n%4!=0)
hipLaunchKernelGGL(( ifftshiftcmul) , dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu], n, ntheta, pnz);
if (filter)
hipLaunchKernelGGL(( applyfilter), dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu],n,ntheta,pnz);
// shift with respect to given center
hipLaunchKernelGGL(( shift) , dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu], shiftadj[igpu], n, ntheta, pnz);
hipLaunchKernelGGL(( gather) , dim3(GS3d3), dim3(BS3d), 0, 0, g[igpu], fdee[igpu], x[igpu], y[igpu], m, mu, n, ntheta, pnz, TOMO_ADJ);
hipLaunchKernelGGL(( wrap) , dim3(GS3d2), dim3(BS3d), 0, 0, fdee[igpu], n, pnz, m, TOMO_ADJ);
hipLaunchKernelGGL(( fftshiftc) , dim3(GS3d2), dim3(BS3d), 0, 0, fdee[igpu], 2 * n + 2 * m, pnz);
hipfftExecC2C(plan2d[igpu], (hipfftComplex *)&fdee[igpu][m + m * (2 * n + 2 * m)],
(hipfftComplex *)&fdee[igpu][m + m * (2 * n + 2 * m)], HIPFFT_BACKWARD);
hipLaunchKernelGGL(( fftshiftc) , dim3(GS3d2), dim3(BS3d), 0, 0, fdee[igpu], 2 * n + 2 * m, pnz);
hipLaunchKernelGGL(( divphi) , dim3(GS3d0), dim3(BS3d), 0, 0, fdee[igpu], f[igpu], mu, n, pnz, m, TOMO_ADJ);
//circ <<<GS3d0, BS3d>>> (f, 1.0f / n, n, pnz);
float2* f0 = (float2 *)f_;
hipMemcpy(f0, f[igpu], n * n * pnz * sizeof(float2),
hipMemcpyDefault);
//}
}
| b9b02c4395fbc18f799e1c23a66000fce160bbf6.cu | #include <stdio.h>
#include "radonusfft.cuh"
#include "kernels_tomo.cu"
#include "shift.cu"
#include "filter.cu"
radonusfft::radonusfft(size_t ntheta, size_t pnz, size_t n, float center,
size_t theta_, size_t ngpus)
: ntheta(ntheta), pnz(pnz), n(n), center(center), ngpus(ngpus) {
float eps = 1e-2;
mu = -log(eps) / (2 * n * n);
m = ceil(2 * n * 1 / PI * sqrt(-mu * log(eps) + (mu * n) * (mu * n) / 4));
f = new float2*[ngpus];
g = new float2*[ngpus];
fdee = new float2*[ngpus];
x = new float*[ngpus];
y = new float*[ngpus];
shiftfwd = new float2*[ngpus];
shiftadj = new float2*[ngpus];
theta = new float*[ngpus];
plan1d = new cufftHandle[ngpus];
plan2d = new cufftHandle[ngpus];
for (int igpu=0;igpu<ngpus;igpu++)
{
cudaSetDevice(igpu);
cudaMalloc((void **)&f[igpu], n * n * pnz * sizeof(float2));
cudaMalloc((void **)&g[igpu], n * ntheta * pnz * sizeof(float2));
cudaMalloc((void **)&fdee[igpu],
(2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2));
cudaMalloc((void **)&x[igpu], n * ntheta * sizeof(float));
cudaMalloc((void **)&y[igpu], n * ntheta * sizeof(float));
cudaMalloc((void **)&theta[igpu], ntheta * sizeof(float));
cudaMemcpy(theta[igpu], (float *)theta_, ntheta * sizeof(float), cudaMemcpyDefault);
int ffts[2];
int idist;
int inembed[2];
// fft 2d
ffts[0] = 2 * n;
ffts[1] = 2 * n;
idist = (2 * n + 2 * m) * (2 * n + 2 * m);
inembed[0] = 2 * n + 2 * m;
inembed[1] = 2 * n + 2 * m;
cufftPlanMany(&plan2d[igpu], 2, ffts, inembed, 1, idist, inembed, 1, idist,
CUFFT_C2C, pnz);
// fft 1d
ffts[0] = n;
idist = n;
inembed[0] = n;
cufftPlanMany(&plan1d[igpu], 1, ffts, inembed, 1, idist, inembed, 1, idist,
CUFFT_C2C, ntheta * pnz);
cudaMalloc((void **)&shiftfwd[igpu], n * sizeof(float2));
cudaMalloc((void **)&shiftadj[igpu], n * sizeof(float2));
// compute shifts with respect to the rotation center
takeshift <<<ceil(n / 1024.0), 1024>>> (shiftfwd[igpu], -(center - n / 2.0), n);
takeshift <<<ceil(n / 1024.0), 1024>>> (shiftadj[igpu], (center - n / 2.0), n);
}
//back tp 0
cudaSetDevice(0);
BS2d = dim3(32, 32);
BS3d = dim3(32, 32, 1);
GS2d0 = dim3(ceil(n / (float)BS2d.x), ceil(ntheta / (float)BS2d.y));
GS3d0 = dim3(ceil(n / (float)BS3d.x), ceil(n / (float)BS3d.y),
ceil(pnz / (float)BS3d.z));
GS3d1 = dim3(ceil(2 * n / (float)BS3d.x), ceil(2 * n / (float)BS3d.y),
ceil(pnz / (float)BS3d.z));
GS3d2 = dim3(ceil((2 * n + 2 * m) / (float)BS3d.x),
ceil((2 * n + 2 * m) / (float)BS3d.y), ceil(pnz / (float)BS3d.z));
GS3d3 = dim3(ceil(n / (float)BS3d.x), ceil(ntheta / (float)BS3d.y),
ceil(pnz / (float)BS3d.z));
}
// destructor, memory deallocation
radonusfft::~radonusfft() { free(); }
void radonusfft::free() {
if (!is_free) {
for(int igpu=0;igpu<ngpus;igpu++)
{
cudaSetDevice(igpu);
cudaFree(f[igpu]);
cudaFree(g[igpu]);
cudaFree(fdee[igpu]);
cudaFree(x[igpu]);
cudaFree(y[igpu]);
cudaFree(shiftfwd[igpu]);
cudaFree(shiftadj[igpu]);
cufftDestroy(plan2d[igpu]);
cufftDestroy(plan1d[igpu]);
}
cudaFree(f);
cudaFree(g);
cudaFree(fdee);
cudaFree(x);
cudaFree(y);
cudaFree(shiftfwd);
cudaFree(shiftadj);
is_free = true;
}
}
void radonusfft::fwd(size_t g_, size_t f_, size_t igpu) {
cudaSetDevice(igpu);
float2* f0 = (float2 *)f_;
cudaMemcpy(f[igpu], f0, n * n * pnz * sizeof(float2), cudaMemcpyDefault);
cudaMemset(fdee[igpu], 0, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2));
//circ <<<GS3d0, BS3d>>> (f, 1.0f / n, n, pnz);
takexy <<<GS2d0, BS2d>>> (x[igpu], y[igpu], theta[igpu], n, ntheta);
divphi <<<GS3d2, BS3d>>> (fdee[igpu], f[igpu], mu, n, pnz, m, TOMO_FWD);
fftshiftc <<<GS3d2, BS3d>>> (fdee[igpu], 2 * n + 2 * m, pnz);
cufftExecC2C(plan2d[igpu], (cufftComplex *)&fdee[igpu][m + m * (2 * n + 2 * m)],
(cufftComplex *)&fdee[igpu][m + m * (2 * n + 2 * m)], CUFFT_FORWARD);
fftshiftc <<<GS3d2, BS3d>>> (fdee[igpu], 2 * n + 2 * m, pnz);
wrap <<<GS3d2, BS3d>>> (fdee[igpu], n, pnz, m, TOMO_FWD);
gather <<<GS3d3, BS3d>>> (g[igpu], fdee[igpu], x[igpu], y[igpu], m, mu, n, ntheta, pnz, TOMO_FWD);
// shift with respect to given center
shift <<<GS3d3, BS3d>>> (g[igpu], shiftfwd[igpu], n, ntheta, pnz);
ifftshiftc <<<GS3d3, BS3d>>> (g[igpu], n, ntheta, pnz);
cufftExecC2C(plan1d[igpu], (cufftComplex *)g[igpu], (cufftComplex *)g[igpu], CUFFT_INVERSE);
ifftshiftc <<<GS3d3, BS3d>>> (g[igpu], n, ntheta, pnz);
if(n%4!=0)
ifftshiftcmul <<<GS3d3, BS3d>>> (g[igpu], n, ntheta, pnz);
float2* g0 = (float2 *)g_;
for (int i=0;i<ntheta;i++)
cudaMemcpy(&g0[i*n*pnz], &g[igpu][i*n*pnz], n * pnz * sizeof(float2), cudaMemcpyDefault);
}
void radonusfft::adj(size_t f_, size_t g_, size_t igpu, bool filter) {
cudaSetDevice(igpu);
float2* g0 = (float2 *)g_;
for (int i=0;i<ntheta;i++)
cudaMemcpy(&g[igpu][i*n*pnz],&g0[i*n*pnz], n * pnz * sizeof(float2), cudaMemcpyDefault);
cudaMemset(fdee[igpu], 0, (2 * n + 2 * m) * (2 * n + 2 * m) * pnz * sizeof(float2));
takexy <<<GS2d0, BS2d>>> (x[igpu], y[igpu], theta[igpu], n, ntheta);
ifftshiftc <<<GS3d3, BS3d>>> (g[igpu], n, ntheta, pnz);
cufftExecC2C(plan1d[igpu], (cufftComplex *)g[igpu], (cufftComplex *)g[igpu], CUFFT_FORWARD);
ifftshiftc <<<GS3d3, BS3d>>> (g[igpu], n, ntheta, pnz);
if(n%4!=0)
ifftshiftcmul <<<GS3d3, BS3d>>> (g[igpu], n, ntheta, pnz);
if (filter)
applyfilter<<<GS3d3, BS3d>>>(g[igpu],n,ntheta,pnz);
// shift with respect to given center
shift <<<GS3d3, BS3d>>> (g[igpu], shiftadj[igpu], n, ntheta, pnz);
gather <<<GS3d3, BS3d>>> (g[igpu], fdee[igpu], x[igpu], y[igpu], m, mu, n, ntheta, pnz, TOMO_ADJ);
wrap <<<GS3d2, BS3d>>> (fdee[igpu], n, pnz, m, TOMO_ADJ);
fftshiftc <<<GS3d2, BS3d>>> (fdee[igpu], 2 * n + 2 * m, pnz);
cufftExecC2C(plan2d[igpu], (cufftComplex *)&fdee[igpu][m + m * (2 * n + 2 * m)],
(cufftComplex *)&fdee[igpu][m + m * (2 * n + 2 * m)], CUFFT_INVERSE);
fftshiftc <<<GS3d2, BS3d>>> (fdee[igpu], 2 * n + 2 * m, pnz);
divphi <<<GS3d0, BS3d>>> (fdee[igpu], f[igpu], mu, n, pnz, m, TOMO_ADJ);
//circ <<<GS3d0, BS3d>>> (f, 1.0f / n, n, pnz);
float2* f0 = (float2 *)f_;
cudaMemcpy(f0, f[igpu], n * n * pnz * sizeof(float2),
cudaMemcpyDefault);
//}
}
|
44355e85186c8991c326b734b588e19b571c4ab6.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hip/hip_runtime.h>
#include "orttraining/training_ops/cuda/activation/activations_grad_impl.h"
#include "orttraining/training_ops/cuda/activation/gelu_grad_impl_common.cuh"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T>
struct OP_GeluGrad : public CtxGeluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
return ComputeGeluGradScalar(dy, x, gelu_computation_mode::Default{});
}
};
template <>
struct OP_GeluGrad<half> : public CtxGeluGrad {
__device__ __inline__ half operator()(const half& dy, const half& x) const {
return static_cast<half>(
ComputeGeluGradScalar(static_cast<float>(dy), static_cast<float>(x), gelu_computation_mode::Default{}));
}
};
template <typename T>
struct OP_FastGeluGrad : public CtxGeluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
return ComputeGeluGradScalar(dy, x, gelu_computation_mode::Approximation{});
}
};
template <typename T>
struct OP_ReluGrad : public CtxReluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
return x > T{0} ? dy : T{0};
}
};
template <typename T>
struct OP_SigmoidGrad : public CtxSigmoidGrad {
__device__ __inline__ T operator()(const T& dy, const T& y) const {
return dy * y * ((T)1 - y);
}
};
template <typename T>
struct OP_QuickGeluGrad : public CtxQuickGeluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
T v = x * static_cast<T>(alpha);
T one = static_cast<T>(1.f);
T zero = static_cast<T>(0.f);
T sigmoid = v >= zero ? one / (one + _Exp(-v)) : one - one / (one + _Exp(v));
return dy * sigmoid * (one + v * (one - sigmoid));
}
};
template <typename T>
struct OP_TanhGrad : public CtxTanhGrad {
__device__ __inline__ T operator()(const T& dy, const T& y) const {
return dy * ((T)1 - y * y);
}
};
template <typename T>
struct OP_LeakyReluGrad : public CtxLeakyReluGrad {
__device__ __inline__ T operator()(const T& dy, const T& y) const {
return dy * (y > T{0} ? T{1} : static_cast<T>(alpha));
}
};
#define BINARY_ELEMENTWISE_IMPL(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
BinaryElementWiseNoBroadcastImpl(stream, \
lhs_data, rhs_data, \
output_data, \
*reinterpret_cast<const OP_##name<T>*>(func_ctx), \
count); \
}
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(name, T) \
template void Impl_##name<T>(hipStream_t stream, const T* lhs_data, const T* rhs_data, T* output_data, const Ctx##name* func_ctx, size_t count);
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
#define ACTIVATION_GRAD_OP_NAME(name) \
BINARY_ELEMENTWISE_IMPL(name); \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(name)
ACTIVATION_GRAD_OPS()
#undef ACTIVATION_GRAD_OP_NAME
} // namespace cuda
} // namespace onnxruntime
| 44355e85186c8991c326b734b588e19b571c4ab6.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cuda_runtime.h>
#include "orttraining/training_ops/cuda/activation/activations_grad_impl.h"
#include "orttraining/training_ops/cuda/activation/gelu_grad_impl_common.cuh"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T>
struct OP_GeluGrad : public CtxGeluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
return ComputeGeluGradScalar(dy, x, gelu_computation_mode::Default{});
}
};
template <>
struct OP_GeluGrad<half> : public CtxGeluGrad {
__device__ __inline__ half operator()(const half& dy, const half& x) const {
return static_cast<half>(
ComputeGeluGradScalar(static_cast<float>(dy), static_cast<float>(x), gelu_computation_mode::Default{}));
}
};
template <typename T>
struct OP_FastGeluGrad : public CtxGeluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
return ComputeGeluGradScalar(dy, x, gelu_computation_mode::Approximation{});
}
};
template <typename T>
struct OP_ReluGrad : public CtxReluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
return x > T{0} ? dy : T{0};
}
};
template <typename T>
struct OP_SigmoidGrad : public CtxSigmoidGrad {
__device__ __inline__ T operator()(const T& dy, const T& y) const {
return dy * y * ((T)1 - y);
}
};
template <typename T>
struct OP_QuickGeluGrad : public CtxQuickGeluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
T v = x * static_cast<T>(alpha);
T one = static_cast<T>(1.f);
T zero = static_cast<T>(0.f);
T sigmoid = v >= zero ? one / (one + _Exp(-v)) : one - one / (one + _Exp(v));
return dy * sigmoid * (one + v * (one - sigmoid));
}
};
template <typename T>
struct OP_TanhGrad : public CtxTanhGrad {
__device__ __inline__ T operator()(const T& dy, const T& y) const {
return dy * ((T)1 - y * y);
}
};
template <typename T>
struct OP_LeakyReluGrad : public CtxLeakyReluGrad {
__device__ __inline__ T operator()(const T& dy, const T& y) const {
return dy * (y > T{0} ? T{1} : static_cast<T>(alpha));
}
};
#define BINARY_ELEMENTWISE_IMPL(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
BinaryElementWiseNoBroadcastImpl(stream, \
lhs_data, rhs_data, \
output_data, \
*reinterpret_cast<const OP_##name<T>*>(func_ctx), \
count); \
}
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(name, T) \
template void Impl_##name<T>(cudaStream_t stream, const T* lhs_data, const T* rhs_data, T* output_data, const Ctx##name* func_ctx, size_t count);
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
#define ACTIVATION_GRAD_OP_NAME(name) \
BINARY_ELEMENTWISE_IMPL(name); \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(name)
ACTIVATION_GRAD_OPS()
#undef ACTIVATION_GRAD_OP_NAME
} // namespace cuda
} // namespace onnxruntime
|
9bc765eef2949741060c1683fbccda7f2c52187a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "common/bertCommon.h"
#include "common/common.cuh"
#include <cassert>
#include <cstring>
#include <hip/hip_runtime.h>
#include <type_traits>
#include <vector>
using namespace nvinfer1;
namespace bert
{
inline __device__ void res_add(
float (&hdata)[4], const uint32_t idata, const uint32_t ires, const float dqData, const float dqRes)
{
char4 ires4 = reinterpret_cast<const char4&>(ires);
char4 idata4 = reinterpret_cast<const char4&>(idata);
hdata[0] = float(idata4.x) * dqData + float(ires4.x) * dqRes;
hdata[1] = float(idata4.y) * dqData + float(ires4.y) * dqRes;
hdata[2] = float(idata4.z) * dqData + float(ires4.z) * dqRes;
hdata[3] = float(idata4.w) * dqData + float(ires4.w) * dqRes;
}
template <int32_t WARPS, int32_t HEADS, int32_t THREADS_PER_ROW>
__global__ void skipln_vec32_hface(const int8_t* input, const int8_t* skip, int8_t* output, const half* beta,
const half* gamma, const float dqScaleIn, const float dqScaleSkip, const float qScale, const int32_t total)
{
// clang-format off
enum { HEAD_SIZE = 64 };
enum { BYTES_PER_LDG = 16 };
enum { THREADS_PER_CTA = WARPS * 32 };
enum { ROWS_PER_LDG = THREADS_PER_CTA / THREADS_PER_ROW };
enum { VECS_PER_CTA = THREADS_PER_ROW / 2 };
enum { PARAM_BYTES = HEADS * HEAD_SIZE * 2 };
enum { PARAM_LDGS = PARAM_BYTES / (THREADS_PER_CTA * BYTES_PER_LDG) };
enum { LDGS = HEADS * 2 / ROWS_PER_LDG };
// clang-format on
static_assert(VECS_PER_CTA == 4, "");
static_assert(PARAM_LDGS == 1, "");
static_assert(ROWS_PER_LDG == HEADS , "");
static_assert(LDGS == 2, "");
static_assert(LDGS * ROWS_PER_LDG == HEADS * 2, "");
static_assert(THREADS_PER_CTA * BYTES_PER_LDG == PARAM_BYTES, "");
static_assert(PARAM_LDGS == 1, "");
extern __shared__ char smem_[];
// space for CTA-wide reduction
__shared__ half2 smem_red[VECS_PER_CTA][WARPS];
constexpr float rld = 1.f / (float(HEADS) * float(HEAD_SIZE));
const int32_t bidx = blockIdx.x;
const int32_t tidx = threadIdx.x;
const int32_t row = tidx / THREADS_PER_ROW;
const int32_t col = tidx % THREADS_PER_ROW;
const int32_t lane = tidx % 32;
const int32_t warp = tidx / 32;
const bool is_warp_lead = (lane < THREADS_PER_ROW) && ((lane & 1) == 0);
const bool is_cta_lead = (tidx < THREADS_PER_ROW) && ((tidx & 1) == 0);
// token position: every two threads load together the 32B at one token
// position
const int32_t pos = col / 2;
const int32_t pos_offset = bidx * VECS_PER_CTA + pos; // for token positions per block, disabling 2 threads per pos
const bool my_pred = pos_offset < total;
const int32_t row_stride_bytes = total * 32;
uint4 in_data[LDGS];
uint4 in_skip[LDGS];
float hdata[LDGS * 4][4];
const int32_t gmem_offset = row * row_stride_bytes + (bidx * THREADS_PER_ROW + col) * BYTES_PER_LDG;
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
in_data[ii] = {0, 0, 0, 0};
in_skip[ii] = {0, 0, 0, 0};
if (my_pred)
{
ldg(input + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]);
ldg(skip + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_skip[ii]);
}
}
uint4* smem_b = reinterpret_cast<uint4*>(&smem_[0]) + tidx;
uint4* smem_g = reinterpret_cast<uint4*>(&smem_[PARAM_BYTES]) + tidx;
const int8_t* beta_ptr = reinterpret_cast<const int8_t*>(beta) + tidx * BYTES_PER_LDG;
const int8_t* gamma_ptr = reinterpret_cast<const int8_t*>(gamma) + tidx * BYTES_PER_LDG;
ldg(beta_ptr, *smem_b);
ldg(gamma_ptr, *smem_g);
half* b = reinterpret_cast<half*>(&smem_[0]);
half* g = reinterpret_cast<half*>(&smem_[PARAM_BYTES]);
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
res_add(hdata[ii * 4 + 0], in_data[ii].x, in_skip[ii].x, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 1], in_data[ii].y, in_skip[ii].y, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 2], in_data[ii].z, in_skip[ii].z, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 3], in_data[ii].w, in_skip[ii].w, dqScaleIn, dqScaleSkip);
}
half2 stats_local = {0, 0};
#pragma unroll
for (int32_t ii = 0; ii < LDGS * 4; ii++)
{
#pragma unroll
for (int32_t jj = 0; jj < 4; jj++)
{
const float tmp = hdata[ii][jj] * (rld);
stats_local = stats_local + __floats2half2_rn(tmp, tmp * hdata[ii][jj]);
}
}
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 1); __syncwarp();
if (VECS_PER_CTA == 1)
{
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 2); __syncwarp();
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 4); __syncwarp();
}
else if (VECS_PER_CTA == 2)
{
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 4); __syncwarp();
}
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 8); __syncwarp();
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 16); __syncwarp();
if (is_warp_lead)
{
smem_red[pos][warp] = stats_local;
}
__syncthreads();
if (is_cta_lead)
{
for (int32_t ii = 1; ii < WARPS; ii++)
{
stats_local = stats_local + smem_red[pos][ii];
}
float mu = __low2float(stats_local);
float sos = __high2float(stats_local);
float rsigma = rsqrtf(sos - mu * mu);
smem_red[pos][0] = __floats2half2_rn(mu, rsigma);
}
__syncthreads();
// load params into smem: 2x Headsx32x2x2B
const float2 statsf = __half22float2(smem_red[pos][0]);
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
#pragma unroll
for (int32_t jj = 0; jj < 4; jj++)
{
#pragma unroll
for (int32_t kk = 0; kk < 4; kk++)
{
const int32_t param_idx = (ii * ROWS_PER_LDG + row) * 32 + (jj * 4 + kk) + (tidx & 1) * 16;
const float bb = b[param_idx];
const float gg = g[param_idx];
hdata[ii * 4 + jj][kk] = gg * statsf.y * (hdata[ii * 4 + jj][kk] - statsf.x) + bb;
}
}
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
in_data[ii].x = pack4(hdata[ii * 4 + 0], qScale);
in_data[ii].y = pack4(hdata[ii * 4 + 1], qScale);
in_data[ii].z = pack4(hdata[ii * 4 + 2], qScale);
in_data[ii].w = pack4(hdata[ii * 4 + 3], qScale);
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
if (my_pred)
{
stg(output + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]);
}
}
// store
}
int32_t launch_large_hface(hipStream_t stream, const int32_t ld, const int32_t total, const int8_t* input,
const int8_t* skip, const half* beta, const half* gamma, int8_t* output, const float dqScaleIn,
const float dqScaleSkip, const float qScale)
{
if (ld == 1024)
{
constexpr int32_t WARPS = 4;
constexpr int32_t THREADS_PER_ROW = 8;
constexpr int32_t HEADS = 16;
constexpr int32_t PARAM_BYTES = HEADS * 64 * 2 * sizeof(half);
constexpr int32_t VECS_PER_CTA = THREADS_PER_ROW / 2;
const int32_t blocks = (total + VECS_PER_CTA - 1) / VECS_PER_CTA;
hipLaunchKernelGGL(( skipln_vec32_hface<WARPS, HEADS, THREADS_PER_ROW>), dim3(blocks), dim3(WARPS * 32), PARAM_BYTES, stream,
input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total);
}
else if (ld == 768)
{
constexpr int32_t WARPS = 3;
constexpr int32_t THREADS_PER_ROW = 8;
constexpr int32_t HEADS = 12;
constexpr int32_t PARAM_BYTES = HEADS * 64 * 2 * sizeof(half);
constexpr int32_t VECS_PER_CTA = THREADS_PER_ROW / 2;
const int32_t blocks = (total + VECS_PER_CTA - 1) / VECS_PER_CTA;
hipLaunchKernelGGL(( skipln_vec32_hface<WARPS, HEADS, THREADS_PER_ROW>), dim3(blocks), dim3(WARPS * 32), PARAM_BYTES, stream,
input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total);
}
else
{
return STATUS_FAILURE;
}
return hipPeekAtLastError();
}
// naive kernel that only changes the addressing seems to be faster for small problem sizes
template <int32_t TPB, int32_t VPT>
__global__ void skiplnDQQ_vec3(const int32_t ld, const int8_t* input, const int8_t* skip, int8_t* output,
const half* beta, const half* gamma, const float dqScaleIn, const float dqScaleSkip, const float qScale,
const int32_t total)
{
const int32_t hinner = threadIdx.x % 4;
const int32_t houter = threadIdx.x / 4;
const int32_t tidx = threadIdx.x;
const int32_t bidx = blockIdx.x;
const int32_t idx = houter * total * 32 + bidx * 32 + hinner * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
int8_t in_local[VPT];
int8_t skip_local[VPT];
half in_local_dq[VPT]; // dequantized input + skip
half beta_local[VPT];
half gamma_local[VPT];
// load input tensors
copy<sizeof(int8_t) * VPT>(&input[idx], in_local);
copy<sizeof(int8_t) * VPT>(&skip[idx], skip_local);
// load parameters
copy<sizeof(half) * VPT>(&beta[tidx * VPT], beta_local);
copy<sizeof(half) * VPT>(&gamma[tidx * VPT], gamma_local);
half2 stats_local = __floats2half2_rn(0.f, 0.f); // accumulator
const half rld = half(1.f) / half(ld);
#pragma unroll
for (int32_t it = 0; it < VPT; it++)
{
// DQ input and skip
const float tmp_in = in_local[it];
const float tmp_skip = skip_local[it];
in_local_dq[it] = dqScaleIn * tmp_in + dqScaleSkip * tmp_skip;
const half tmp = rld * in_local_dq[it];
const half2 tmp2 = __halves2half2(tmp, tmp * in_local_dq[it]);
stats_local = stats_local + tmp2;
}
using BlockReduce = hipcub::BlockReduce<half2, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ half mu; // mean
__shared__ half rsigma; // 1 / std.dev.
const half2 sum2 = BlockReduce(temp_storage).Reduce(stats_local, hipcub::Sum());
if (tidx == 0)
{
mu = __low2half(sum2);
rsigma = rsqrtf(__high2half(sum2) - mu * mu);
}
__syncthreads();
static_assert(VPT % 4 == 0, "");
uint32_t out_local[VPT/4];
#pragma unroll
for (int it = 0; it < VPT / 4; it++)
{
const float tmp0 = gamma_local[it*4+0] * (in_local_dq[it*4+0] - mu) * rsigma + beta_local[it*4+0];
const float tmp1 = gamma_local[it*4+1] * (in_local_dq[it*4+1] - mu) * rsigma + beta_local[it*4+1];
const float tmp2 = gamma_local[it*4+2] * (in_local_dq[it*4+2] - mu) * rsigma + beta_local[it*4+2];
const float tmp3 = gamma_local[it*4+3] * (in_local_dq[it*4+3] - mu) * rsigma + beta_local[it*4+3];
out_local[it] = float4_to_char4(tmp0 * qScale, tmp1 * qScale, tmp2 * qScale, tmp3 * qScale);
}
copy<sizeof(int8_t) * VPT>(out_local, &output[idx]);
}
int launch_small_hface(hipStream_t stream, const int32_t ld, const int32_t total, const int8_t* input,
const int8_t* skip, const half* beta, const half* gamma, int8_t* output, const float dqScaleIn,
const float dqScaleSkip, const float qScale)
{
const int32_t gridSize = total;
// we align reads with the number of parameters, i.e. 8-wide instead of 16
constexpr int32_t VPT = 16 / sizeof(half); // 8
if (ld == 768)
{
constexpr int32_t TPB = 768 / VPT;
hipLaunchKernelGGL(( skiplnDQQ_vec3<TPB, VPT>)
, dim3(gridSize), dim3(TPB), 0, stream, ld, input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total);
}
else if (ld == 1024)
{
constexpr int32_t TPB = 1024 / VPT; // 128
hipLaunchKernelGGL(( skiplnDQQ_vec3<TPB, VPT>)
, dim3(gridSize), dim3(TPB), 0, stream, ld, input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total);
}
else
{
std::cout << "SkipLayerNormDQQ - FATAL: unsupported hidden layer size: " << ld << std::endl;
return STATUS_FAILURE;
}
return hipPeekAtLastError();
}
} // namespace bert
| 9bc765eef2949741060c1683fbccda7f2c52187a.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "common/bertCommon.h"
#include "common/common.cuh"
#include <cassert>
#include <cstring>
#include <cuda.h>
#include <type_traits>
#include <vector>
using namespace nvinfer1;
namespace bert
{
inline __device__ void res_add(
float (&hdata)[4], const uint32_t idata, const uint32_t ires, const float dqData, const float dqRes)
{
char4 ires4 = reinterpret_cast<const char4&>(ires);
char4 idata4 = reinterpret_cast<const char4&>(idata);
hdata[0] = float(idata4.x) * dqData + float(ires4.x) * dqRes;
hdata[1] = float(idata4.y) * dqData + float(ires4.y) * dqRes;
hdata[2] = float(idata4.z) * dqData + float(ires4.z) * dqRes;
hdata[3] = float(idata4.w) * dqData + float(ires4.w) * dqRes;
}
template <int32_t WARPS, int32_t HEADS, int32_t THREADS_PER_ROW>
__global__ void skipln_vec32_hface(const int8_t* input, const int8_t* skip, int8_t* output, const half* beta,
const half* gamma, const float dqScaleIn, const float dqScaleSkip, const float qScale, const int32_t total)
{
// clang-format off
enum { HEAD_SIZE = 64 };
enum { BYTES_PER_LDG = 16 };
enum { THREADS_PER_CTA = WARPS * 32 };
enum { ROWS_PER_LDG = THREADS_PER_CTA / THREADS_PER_ROW };
enum { VECS_PER_CTA = THREADS_PER_ROW / 2 };
enum { PARAM_BYTES = HEADS * HEAD_SIZE * 2 };
enum { PARAM_LDGS = PARAM_BYTES / (THREADS_PER_CTA * BYTES_PER_LDG) };
enum { LDGS = HEADS * 2 / ROWS_PER_LDG };
// clang-format on
static_assert(VECS_PER_CTA == 4, "");
static_assert(PARAM_LDGS == 1, "");
static_assert(ROWS_PER_LDG == HEADS , "");
static_assert(LDGS == 2, "");
static_assert(LDGS * ROWS_PER_LDG == HEADS * 2, "");
static_assert(THREADS_PER_CTA * BYTES_PER_LDG == PARAM_BYTES, "");
static_assert(PARAM_LDGS == 1, "");
extern __shared__ char smem_[];
// space for CTA-wide reduction
__shared__ half2 smem_red[VECS_PER_CTA][WARPS];
constexpr float rld = 1.f / (float(HEADS) * float(HEAD_SIZE));
const int32_t bidx = blockIdx.x;
const int32_t tidx = threadIdx.x;
const int32_t row = tidx / THREADS_PER_ROW;
const int32_t col = tidx % THREADS_PER_ROW;
const int32_t lane = tidx % 32;
const int32_t warp = tidx / 32;
const bool is_warp_lead = (lane < THREADS_PER_ROW) && ((lane & 1) == 0);
const bool is_cta_lead = (tidx < THREADS_PER_ROW) && ((tidx & 1) == 0);
// token position: every two threads load together the 32B at one token
// position
const int32_t pos = col / 2;
const int32_t pos_offset = bidx * VECS_PER_CTA + pos; // for token positions per block, disabling 2 threads per pos
const bool my_pred = pos_offset < total;
const int32_t row_stride_bytes = total * 32;
uint4 in_data[LDGS];
uint4 in_skip[LDGS];
float hdata[LDGS * 4][4];
const int32_t gmem_offset = row * row_stride_bytes + (bidx * THREADS_PER_ROW + col) * BYTES_PER_LDG;
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
in_data[ii] = {0, 0, 0, 0};
in_skip[ii] = {0, 0, 0, 0};
if (my_pred)
{
ldg(input + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]);
ldg(skip + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_skip[ii]);
}
}
uint4* smem_b = reinterpret_cast<uint4*>(&smem_[0]) + tidx;
uint4* smem_g = reinterpret_cast<uint4*>(&smem_[PARAM_BYTES]) + tidx;
const int8_t* beta_ptr = reinterpret_cast<const int8_t*>(beta) + tidx * BYTES_PER_LDG;
const int8_t* gamma_ptr = reinterpret_cast<const int8_t*>(gamma) + tidx * BYTES_PER_LDG;
ldg(beta_ptr, *smem_b);
ldg(gamma_ptr, *smem_g);
half* b = reinterpret_cast<half*>(&smem_[0]);
half* g = reinterpret_cast<half*>(&smem_[PARAM_BYTES]);
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
res_add(hdata[ii * 4 + 0], in_data[ii].x, in_skip[ii].x, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 1], in_data[ii].y, in_skip[ii].y, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 2], in_data[ii].z, in_skip[ii].z, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 3], in_data[ii].w, in_skip[ii].w, dqScaleIn, dqScaleSkip);
}
half2 stats_local = {0, 0};
#pragma unroll
for (int32_t ii = 0; ii < LDGS * 4; ii++)
{
#pragma unroll
for (int32_t jj = 0; jj < 4; jj++)
{
const float tmp = hdata[ii][jj] * (rld);
stats_local = stats_local + __floats2half2_rn(tmp, tmp * hdata[ii][jj]);
}
}
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 1); __syncwarp();
if (VECS_PER_CTA == 1)
{
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 2); __syncwarp();
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 4); __syncwarp();
}
else if (VECS_PER_CTA == 2)
{
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 4); __syncwarp();
}
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 8); __syncwarp();
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 16); __syncwarp();
if (is_warp_lead)
{
smem_red[pos][warp] = stats_local;
}
__syncthreads();
if (is_cta_lead)
{
for (int32_t ii = 1; ii < WARPS; ii++)
{
stats_local = stats_local + smem_red[pos][ii];
}
float mu = __low2float(stats_local);
float sos = __high2float(stats_local);
float rsigma = rsqrtf(sos - mu * mu);
smem_red[pos][0] = __floats2half2_rn(mu, rsigma);
}
__syncthreads();
// load params into smem: 2x Headsx32x2x2B
const float2 statsf = __half22float2(smem_red[pos][0]);
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
#pragma unroll
for (int32_t jj = 0; jj < 4; jj++)
{
#pragma unroll
for (int32_t kk = 0; kk < 4; kk++)
{
const int32_t param_idx = (ii * ROWS_PER_LDG + row) * 32 + (jj * 4 + kk) + (tidx & 1) * 16;
const float bb = b[param_idx];
const float gg = g[param_idx];
hdata[ii * 4 + jj][kk] = gg * statsf.y * (hdata[ii * 4 + jj][kk] - statsf.x) + bb;
}
}
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
in_data[ii].x = pack4(hdata[ii * 4 + 0], qScale);
in_data[ii].y = pack4(hdata[ii * 4 + 1], qScale);
in_data[ii].z = pack4(hdata[ii * 4 + 2], qScale);
in_data[ii].w = pack4(hdata[ii * 4 + 3], qScale);
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
if (my_pred)
{
stg(output + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]);
}
}
// store
}
int32_t launch_large_hface(cudaStream_t stream, const int32_t ld, const int32_t total, const int8_t* input,
const int8_t* skip, const half* beta, const half* gamma, int8_t* output, const float dqScaleIn,
const float dqScaleSkip, const float qScale)
{
if (ld == 1024)
{
constexpr int32_t WARPS = 4;
constexpr int32_t THREADS_PER_ROW = 8;
constexpr int32_t HEADS = 16;
constexpr int32_t PARAM_BYTES = HEADS * 64 * 2 * sizeof(half);
constexpr int32_t VECS_PER_CTA = THREADS_PER_ROW / 2;
const int32_t blocks = (total + VECS_PER_CTA - 1) / VECS_PER_CTA;
skipln_vec32_hface<WARPS, HEADS, THREADS_PER_ROW><<<blocks, WARPS * 32, PARAM_BYTES, stream>>>(
input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total);
}
else if (ld == 768)
{
constexpr int32_t WARPS = 3;
constexpr int32_t THREADS_PER_ROW = 8;
constexpr int32_t HEADS = 12;
constexpr int32_t PARAM_BYTES = HEADS * 64 * 2 * sizeof(half);
constexpr int32_t VECS_PER_CTA = THREADS_PER_ROW / 2;
const int32_t blocks = (total + VECS_PER_CTA - 1) / VECS_PER_CTA;
skipln_vec32_hface<WARPS, HEADS, THREADS_PER_ROW><<<blocks, WARPS * 32, PARAM_BYTES, stream>>>(
input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total);
}
else
{
return STATUS_FAILURE;
}
return cudaPeekAtLastError();
}
// naive kernel that only changes the addressing seems to be faster for small problem sizes
template <int32_t TPB, int32_t VPT>
__global__ void skiplnDQQ_vec3(const int32_t ld, const int8_t* input, const int8_t* skip, int8_t* output,
const half* beta, const half* gamma, const float dqScaleIn, const float dqScaleSkip, const float qScale,
const int32_t total)
{
const int32_t hinner = threadIdx.x % 4;
const int32_t houter = threadIdx.x / 4;
const int32_t tidx = threadIdx.x;
const int32_t bidx = blockIdx.x;
const int32_t idx = houter * total * 32 + bidx * 32 + hinner * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
int8_t in_local[VPT];
int8_t skip_local[VPT];
half in_local_dq[VPT]; // dequantized input + skip
half beta_local[VPT];
half gamma_local[VPT];
// load input tensors
copy<sizeof(int8_t) * VPT>(&input[idx], in_local);
copy<sizeof(int8_t) * VPT>(&skip[idx], skip_local);
// load parameters
copy<sizeof(half) * VPT>(&beta[tidx * VPT], beta_local);
copy<sizeof(half) * VPT>(&gamma[tidx * VPT], gamma_local);
half2 stats_local = __floats2half2_rn(0.f, 0.f); // accumulator
const half rld = half(1.f) / half(ld);
#pragma unroll
for (int32_t it = 0; it < VPT; it++)
{
// DQ input and skip
const float tmp_in = in_local[it];
const float tmp_skip = skip_local[it];
in_local_dq[it] = dqScaleIn * tmp_in + dqScaleSkip * tmp_skip;
const half tmp = rld * in_local_dq[it];
const half2 tmp2 = __halves2half2(tmp, tmp * in_local_dq[it]);
stats_local = stats_local + tmp2;
}
using BlockReduce = cub::BlockReduce<half2, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ half mu; // mean
__shared__ half rsigma; // 1 / std.dev.
const half2 sum2 = BlockReduce(temp_storage).Reduce(stats_local, cub::Sum());
if (tidx == 0)
{
mu = __low2half(sum2);
rsigma = rsqrtf(__high2half(sum2) - mu * mu);
}
__syncthreads();
static_assert(VPT % 4 == 0, "");
uint32_t out_local[VPT/4];
#pragma unroll
for (int it = 0; it < VPT / 4; it++)
{
const float tmp0 = gamma_local[it*4+0] * (in_local_dq[it*4+0] - mu) * rsigma + beta_local[it*4+0];
const float tmp1 = gamma_local[it*4+1] * (in_local_dq[it*4+1] - mu) * rsigma + beta_local[it*4+1];
const float tmp2 = gamma_local[it*4+2] * (in_local_dq[it*4+2] - mu) * rsigma + beta_local[it*4+2];
const float tmp3 = gamma_local[it*4+3] * (in_local_dq[it*4+3] - mu) * rsigma + beta_local[it*4+3];
out_local[it] = float4_to_char4(tmp0 * qScale, tmp1 * qScale, tmp2 * qScale, tmp3 * qScale);
}
copy<sizeof(int8_t) * VPT>(out_local, &output[idx]);
}
int launch_small_hface(cudaStream_t stream, const int32_t ld, const int32_t total, const int8_t* input,
const int8_t* skip, const half* beta, const half* gamma, int8_t* output, const float dqScaleIn,
const float dqScaleSkip, const float qScale)
{
const int32_t gridSize = total;
// we align reads with the number of parameters, i.e. 8-wide instead of 16
constexpr int32_t VPT = 16 / sizeof(half); // 8
if (ld == 768)
{
constexpr int32_t TPB = 768 / VPT;
skiplnDQQ_vec3<TPB, VPT>
<<<gridSize, TPB, 0, stream>>>(ld, input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total);
}
else if (ld == 1024)
{
constexpr int32_t TPB = 1024 / VPT; // 128
skiplnDQQ_vec3<TPB, VPT>
<<<gridSize, TPB, 0, stream>>>(ld, input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total);
}
else
{
std::cout << "SkipLayerNormDQQ - FATAL: unsupported hidden layer size: " << ld << std::endl;
return STATUS_FAILURE;
}
return cudaPeekAtLastError();
}
} // namespace bert
|
e0c70de59ec3e95ebd18fb31793c9b84c7770d45.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zgecsr5mv.cu, normal z -> d, Thu Oct 8 23:05:48 2020
@author Weifeng Liu
*/
// CSR5 SpMV kernel
// see paper by W. Liu and B. Vinter. (2015).
// "CSR5: An Efficient Storage Format for Cross-Platform
// Sparse Matrix-Vector Multiplication".
// 29th ACM International Conference on Supercomputing (ICS15). pp. 339-350.
#include "magmasparse_internal.h"
#include "atomicopsdouble.h"
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#define MAGMA_CSR5_THREAD_GROUP 128
#define MAGMA_CSR5_THREAD_BUNCH 32
#if (defined( TORCH_HIP_VERSION ) && ( TORCH_HIP_VERSION >= 8000 ))
__inline__ __device__ void
sum_32(
double *s_sum,
const int local_id)
{
if (local_id < 16) s_sum[local_id] += s_sum[local_id + 16];
if (local_id < 8) s_sum[local_id] += s_sum[local_id + 8];
if (local_id < 4) s_sum[local_id] += s_sum[local_id + 4];
if (local_id < 2) s_sum[local_id] += s_sum[local_id + 2];
if (local_id < 1) s_sum[local_id] += s_sum[local_id + 1];
}
__inline__ __device__ void
scan_32(
double *s_scan,
const int local_id)
{
int ai, bi;
const int baseai = 2 * local_id + 1;
const int basebi = baseai + 1;
double temp;
if (local_id < 16) { ai = baseai - 1; bi = basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id == 0) { s_scan[31] = s_scan[15]; s_scan[15] = MAGMA_D_ZERO; }
if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 16) { ai = baseai - 1; bi = basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
}
__inline__ __device__ double
candidate(
double *d_value_tile,
double *d_x,
const magma_index_t *d_column_index_tile,
const magma_index_t candidate_index,
const double alpha)
{
double x = MAGMA_D_ZERO;
#if __CUDA_ARCH__ >= 350
x = __ldg(&d_x[d_column_index_tile[candidate_index]]);
#else
x = d_x[d_column_index_tile[candidate_index]];
#endif
return d_value_tile[candidate_index] * x * alpha;
}
//template<typename vT>
//__forceinline__ __device__
//vT segmented_sum_shfl(vT tmp_sum,
// const int scansum_offset,
// const int lane_id)
//{
// vT sum = __shfl_down(tmp_sum, 1);
// sum = lane_id == MAGMA_CSR5_OMEGA - 1 ? 0 : sum;
// // inclusive scan
// vT scan_sum = scan_32_shfl(sum); //scan_32_shfl<vT>(sum, lane_id);
// tmp_sum = __shfl_down(scan_sum, scansum_offset);
// tmp_sum = tmp_sum - scan_sum + sum;
//
// return tmp_sum;
//}
__forceinline__ __device__ double
segmented_sum(
double tmp_sum,
double *s_sum,
const magma_index_t scansum_offset,
const magma_index_t lane_id)
{
if (lane_id)
s_sum[lane_id - 1] = tmp_sum;
s_sum[lane_id] = lane_id == MAGMA_CSR5_OMEGA - 1
? MAGMA_D_ZERO : s_sum[lane_id];
double sum = tmp_sum = s_sum[lane_id];
scan_32(s_sum, lane_id); // exclusive scan
s_sum[lane_id] += tmp_sum; // inclusive scan (exclusive scan+original val)
tmp_sum = s_sum[lane_id + scansum_offset];
tmp_sum = tmp_sum - s_sum[lane_id] + sum;
return tmp_sum;
}
template<int c_sigma>
__inline__ __device__ void
tile_fast_track(
double *d_value_tile,
double *d_x,
const magma_index_t *d_column_index_tile,
double *d_calibrator,
//#if __CUDA_ARCH__ < 300
double *s_sum,
//#endif
const int lane_id,
const magma_index_t par_id,
const double alpha)
{
double sum = MAGMA_D_ZERO;
#pragma unroll
for (int i = 0; i < c_sigma; i++)
{
sum += candidate(d_value_tile, d_x, d_column_index_tile,
i * MAGMA_CSR5_OMEGA + lane_id, alpha);
}
//#if __CUDA_ARCH__ >= 300 // use shfl intrinsic
// sum = sum_32_shfl<vT>(sum);
// if (!lane_id)
// d_calibrator[par_id] = sum;
//#else // use smem
s_sum[lane_id] = sum;
sum_32(s_sum, lane_id);
if (!lane_id)
{
d_calibrator[par_id] = s_sum[0];
}
//#endif
}
template<int c_sigma>
__inline__ __device__ void
tile_normal_track(
const magma_index_t *d_column_index_tile,
double *d_value_tile,
double *d_x,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
double *d_calibrator,
double *d_y,
//#if __CUDA_ARCH__ < 300
double *s_sum,
volatile int *s_scan,
//#endif
const magma_index_t par_id,
const int lane_id,
const int bit_y_offset,
const int bit_scansum_offset,
const bool empty_rows,
const double alpha)
{
int start = 0;
int stop = 0;
bool local_bit;
double sum = MAGMA_D_ZERO;
magma_index_t offset_pointer = empty_rows ?
d_tile_desc_offset_ptr[par_id] : 0;
magma_uindex_t descriptor = d_tile_desc[lane_id];
magma_index_t y_offset = descriptor >> (32 - bit_y_offset);
const int scansum_offset = (descriptor << bit_y_offset)
>> (32 - bit_scansum_offset);
const int bit_bitflag = 32 - bit_y_offset - bit_scansum_offset;
bool direct = false;
double first_sum, last_sum;
// step 1. thread-level seg sum
int ly = 0;
// extract the first bit-flag packet
descriptor = descriptor << (bit_y_offset + bit_scansum_offset);
descriptor = lane_id ? descriptor : descriptor | 0x80000000;
local_bit = (descriptor >> 31) & 0x1;
start = !local_bit;
direct = local_bit & (bool)lane_id;
sum = candidate(d_value_tile, d_x,
d_column_index_tile, lane_id, alpha);
#pragma unroll
for (int i = 1; i < c_sigma; i++)
{
int norm_i = i - bit_bitflag;
if (!(ly || norm_i) || (ly && !(31 & norm_i)))
{
ly++;
descriptor = d_tile_desc[ly * MAGMA_CSR5_OMEGA + lane_id];
}
norm_i = !ly ? 31 & i : 31 & norm_i;
norm_i = 31 - norm_i;
local_bit = (descriptor >> norm_i) & 0x1;
if (local_bit)
{
if (direct)
d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset]
: y_offset] += sum;
else
first_sum = sum;
}
y_offset += local_bit & direct;
direct |= local_bit;
sum = local_bit ? MAGMA_D_ZERO : sum;
stop += local_bit;
sum += candidate(d_value_tile, d_x, d_column_index_tile,
i * MAGMA_CSR5_OMEGA + lane_id, alpha);
}
first_sum = direct ? first_sum : sum;
last_sum = sum;
// step 2. segmented sum
sum = start ? first_sum : MAGMA_D_ZERO;
//#if __CUDA_ARCH__ >= 300
// sum = segmented_sum_shfl<vT>(sum, scansum_offset, lane_id);
//#else
sum = segmented_sum(sum, s_sum, scansum_offset, lane_id);
//#endif
// step 3-1. add s_sum to position stop
last_sum += (start <= stop) ? sum : MAGMA_D_ZERO;
// step 3-2. write sums to result array
if (direct)
d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset]
: y_offset] += last_sum;
// the first/last value of the first thread goes to calibration
if (!lane_id)
d_calibrator[par_id] = direct ? first_sum : last_sum;
}
template<int c_sigma>
__inline__ __device__ void
spmv_tile(
const magma_index_t *d_column_index_tile,
double *d_value_tile,
const magma_index_t *d_row_pointer,
double *d_x,
const magma_uindex_t *d_tile_ptr,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
double *d_calibrator,
double *d_y,
const magma_index_t par_id,
const int lane_id,
const int bunch_id,
const int bit_y_offset,
const int bit_scansum_offset,
const double alpha)
{
//#if __CUDA_ARCH__ < 300
__shared__ double
s_sum[MAGMA_CSR5_THREAD_GROUP];
volatile __shared__ int
s_scan[(MAGMA_CSR5_OMEGA + 1) *
(MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA)];
//#endif
magma_uindex_t row_start, row_stop;
//#if __CUDA_ARCH__ >= 350
// if (lane_id < 2)
// row_start = __ldg(&d_tile_ptr[par_id + lane_id]);
// row_stop = __shfl(row_start, 1);
// row_start = __shfl(row_start, 0);
// row_stop &= 0x7FFFFFFF;
//#else
volatile __shared__ magma_uindex_t
s_row_start_stop[MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1];
if (threadIdx.x < MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1)
{
s_row_start_stop[threadIdx.x] = d_tile_ptr[par_id + threadIdx.x];
}
__syncthreads();
row_start = s_row_start_stop[bunch_id];
row_stop = s_row_start_stop[bunch_id + 1] & 0x7FFFFFFF;
//#endif
if (row_start == row_stop) // fast track through reduction
{
tile_fast_track<c_sigma>
(d_value_tile, d_x, d_column_index_tile, d_calibrator,
//#if __CUDA_ARCH__ < 300
&s_sum[bunch_id * MAGMA_CSR5_OMEGA],
//#endif
lane_id, par_id, alpha);
}
else
{
const bool empty_rows = (row_start >> 31) & 0x1;
row_start &= 0x7FFFFFFF;
d_y = &d_y[row_start+1];
tile_normal_track<c_sigma>
(d_column_index_tile, d_value_tile, d_x,
d_tile_desc, d_tile_desc_offset_ptr,
d_tile_desc_offset, d_calibrator, d_y,
//#if __CUDA_ARCH__ < 300
&s_sum[bunch_id * MAGMA_CSR5_OMEGA],
&s_scan[bunch_id * (MAGMA_CSR5_OMEGA + 1)],
//#endif
par_id, lane_id,
bit_y_offset, bit_scansum_offset, empty_rows, alpha);
}
}
template<int c_sigma>
__global__ void
spmv_csr5_compute_kernel(
const magma_index_t *d_column_index,
double *d_value,
const magma_index_t *d_row_pointer,
double *d_x,
const magma_uindex_t *d_tile_ptr,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
double *d_calibrator,
double *d_y,
const magma_index_t p,
const int num_packet,
const int bit_y_offset,
const int bit_scansum_offset,
const double alpha)
{
// warp lane id
const int lane_id = 31 & threadIdx.x; //threadIdx.x % CSR5_OMEGA;
// warp global id == par_id
const magma_index_t par_id = (blockIdx.x * blockDim.x + threadIdx.x)
/ MAGMA_CSR5_OMEGA;
const int bunch_id = threadIdx.x / MAGMA_CSR5_OMEGA;
if (par_id >= p - 1)
return;
spmv_tile<c_sigma>
(&d_column_index[par_id * MAGMA_CSR5_OMEGA * c_sigma],
&d_value[par_id * MAGMA_CSR5_OMEGA * c_sigma],
d_row_pointer, d_x, d_tile_ptr,
&d_tile_desc[par_id * MAGMA_CSR5_OMEGA * num_packet],
d_tile_desc_offset_ptr, d_tile_desc_offset,
d_calibrator, d_y,
par_id, lane_id, bunch_id, bit_y_offset, bit_scansum_offset, alpha);
}
__global__ void
spmv_csr5_calibrate_kernel(
const magma_uindex_t *d_tile_ptr,
const double *d_calibrator,
double *d_y,
const magma_index_t p)
{
//const int lane_id = threadIdx.x % MAGMA_CSR5_THREAD_BUNCH;
//const int bunch_id = threadIdx.x / MAGMA_CSR5_THREAD_BUNCH;
const int local_id = threadIdx.x;
const magma_index_t global_id = blockIdx.x * blockDim.x + threadIdx.x;
double sum;
volatile __shared__
magma_index_t s_tile_ptr[MAGMA_CSR5_THREAD_GROUP+1];
__shared__ double s_calibrator[MAGMA_CSR5_THREAD_GROUP];
//volatile __shared__
// double s_sum[MAGMA_CSR5_THREAD_GROUP
// / MAGMA_CSR5_THREAD_BUNCH];
s_tile_ptr[local_id] = global_id < p-1 ?
(magma_index_t)(d_tile_ptr[global_id] & 0x7FFFFFFF) : -1;
s_calibrator[local_id] = sum = global_id < p-1 ?
d_calibrator[global_id] : MAGMA_D_ZERO;
__syncthreads();
// do a fast track if all s_tile_ptr are the same
if (s_tile_ptr[0] == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP - 1])
{
//sum = sum_32_shfl<vT>(sum);
//if (!lane_id)
// s_sum[bunch_id] = sum;
//__syncthreads();
//if (!bunch_id)
//{
// sum = lane_id < (MAGMA_CSR5_THREAD_GROUP
// / MAGMA_CSR5_THREAD_BUNCH) ? s_sum[lane_id] : 0;
// sum = sum_32_shfl<vT>(sum);
//}
if (local_id < 64) s_calibrator[local_id] += s_calibrator[local_id+64];
__syncthreads();
if (local_id < 32) s_calibrator[local_id] += s_calibrator[local_id+32];
if (local_id < 16) s_calibrator[local_id] += s_calibrator[local_id+16];
if (local_id < 8) s_calibrator[local_id] += s_calibrator[local_id+8];
if (local_id < 4) s_calibrator[local_id] += s_calibrator[local_id+4];
if (local_id < 2) s_calibrator[local_id] += s_calibrator[local_id+2];
if (local_id < 1) s_calibrator[local_id] += s_calibrator[local_id+1];
if (!local_id)
{
atomicAdddouble(&d_y[s_tile_ptr[0]], s_calibrator[0]);
}
return;
}
int local_par_id = local_id;
magma_index_t row_start_current, row_start_target, row_start_previous;
sum = MAGMA_D_ZERO;
// use (p - 1), due to the tail tile is dealt with CSR-vector method
if (global_id < p - 1)
{
row_start_previous = local_id ? s_tile_ptr[local_id-1] : -1;
row_start_current = s_tile_ptr[local_id];
if (row_start_previous != row_start_current)
{
row_start_target = row_start_current;
while (row_start_target == row_start_current
&& local_par_id < blockDim.x)
{
sum += s_calibrator[local_par_id];
local_par_id++;
row_start_current = s_tile_ptr[local_par_id];
}
if (row_start_target == s_tile_ptr[0]
|| row_start_target == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP-1])
{
atomicAdddouble(&d_y[row_start_target], sum);
}
else
d_y[row_start_target] += sum;
}
}
}
__global__ void
spmv_csr5_tail_tile_kernel(
const magma_index_t *d_row_pointer,
const magma_index_t *d_column_index,
double *d_value,
double *d_x,
double *d_y,
const magma_index_t tail_tile_start,
const magma_index_t p,
const int sigma,
const double alpha)
{
const int local_id = threadIdx.x;
const magma_index_t row_id = tail_tile_start + blockIdx.x;
const magma_index_t row_start = !blockIdx.x ? (p - 1)
* MAGMA_CSR5_OMEGA * sigma
: d_row_pointer[row_id];
const magma_index_t row_stop = d_row_pointer[row_id + 1];
double sum = MAGMA_D_ZERO;
for (magma_index_t idx = local_id + row_start;
idx < row_stop; idx += MAGMA_CSR5_OMEGA)
{
sum += candidate(d_value, d_x, d_column_index, idx, alpha);
}
//#if __CUDA_ARCH__ >= 300 // use shfl intrinsic
// sum = sum_32_shfl<vT>(sum);
//#else
__shared__ double s_sum[MAGMA_CSR5_OMEGA];
s_sum[local_id] = sum;
sum_32(s_sum, local_id);
//#endif
if (!local_id)
d_y[row_id] += s_sum[0]; //= !blockIdx.x ? d_y[row_id] + sum : sum;
}
__global__ void
dgecsr5mv_kernel_update_y(int num_rows,
double beta,
double * dy)
{
const magma_index_t row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < num_rows)
{
if (beta == MAGMA_D_ZERO)
dy[row] = MAGMA_D_ZERO;
else
dy[row] *= beta;
}
}
#endif
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR5 (val (tile-wise column-major),
row_pointer,
col (tile-wise column-major),
tile_pointer,
tile_desc).
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
p magma_int_t
number of tiles in A
@param[in]
alpha double
scalar multiplier
@param[in]
sigma magma_int_t
sigma in A in CSR5
@param[in]
bit_y_offset magma_int_t
bit_y_offset in A in CSR5
@param[in]
bit_scansum_offset magma_int_t
bit_scansum_offset in A in CSR5
@param[in]
num_packet magma_int_t
num_packet in A in CSR5
@param[in]
dtile_ptr magmaUIndex_ptr
tilepointer of A in CSR5
@param[in]
dtile_desc magmaUIndex_ptr
tiledescriptor of A in CSR5
@param[in]
dtile_desc_offset_ptr magmaIndex_ptr
tiledescriptor_offsetpointer of A in CSR5
@param[in]
dtile_desc_offset magmaIndex_ptr
tiledescriptor_offsetpointer of A in CSR5
@param[in]
dcalibrator magmaDouble_ptr
calibrator of A in CSR5
@param[in]
tail_tile_start magma_int_t
start of the last tile in A
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgecsr5mv(
magma_trans_t transA,
magma_int_t m,
magma_int_t n,
magma_int_t p,
double alpha,
magma_int_t sigma,
magma_int_t bit_y_offset,
magma_int_t bit_scansum_offset,
magma_int_t num_packet,
magmaUIndex_ptr dtile_ptr,
magmaUIndex_ptr dtile_desc,
magmaIndex_ptr dtile_desc_offset_ptr,
magmaIndex_ptr dtile_desc_offset,
magmaDouble_ptr dcalibrator,
magma_int_t tail_tile_start,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
int info = MAGMA_ERR_NOT_SUPPORTED;
#if (defined( TORCH_HIP_VERSION ) && ( TORCH_HIP_VERSION >= 8000 ))
magma_int_t arch = magma_getdevice_arch();
if ( arch >= 600 ) {
//dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
//magma_int_t threads = BLOCK_SIZE;
//dgecsrmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
// (m, n, alpha, dval, drowptr, dcolind, dx, beta, dy);
// phase 1. update y: y = beta * y
magma_int_t num_threads = MAGMA_CSR5_THREAD_GROUP;
magma_int_t num_blocks = magma_ceildiv( m, num_threads );
//ceil ((double)m / (double)num_threads);
hipLaunchKernelGGL(( dgecsr5mv_kernel_update_y)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , m, beta, dy);
// phase 2. spmv: y += alpha * A * x
num_threads = MAGMA_CSR5_THREAD_GROUP;
num_blocks = magma_ceildiv( p-1, num_threads / MAGMA_CSR5_OMEGA );
// ceil ((double)(p-1) / (double)(num_threads / MAGMA_CSR5_OMEGA));
switch (sigma)
{
case 4:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<4>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 5:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<5>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 6:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<6>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 7:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<7>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 8:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<8>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 9:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<9>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 10:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<10>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 11:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<11>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 12:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<12>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 13:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<13>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 14:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<14>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 15:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<15>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 16:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<16>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 17:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<17>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 18:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<18>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 19:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<19>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 20:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<20>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 21:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<21>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 22:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<22>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 23:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<23>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 24:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<24>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 25:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<25>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 26:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<26>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 27:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<27>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 28:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<28>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 29:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<29>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 30:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<30>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 31:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<31>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 32:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<32>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
}
num_threads = MAGMA_CSR5_THREAD_GROUP;
num_blocks = ceil((double)(p-1)/(double)num_threads);
hipLaunchKernelGGL(( spmv_csr5_calibrate_kernel)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dtile_ptr, dcalibrator, dy, p);
num_threads = MAGMA_CSR5_OMEGA;
num_blocks = m - tail_tile_start;
hipLaunchKernelGGL(( spmv_csr5_tail_tile_kernel)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
drowptr, dcolind, dval, dx, dy,
tail_tile_start, p, sigma, alpha);
info = MAGMA_SUCCESS;
}
else {
info = MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return info;
}
| e0c70de59ec3e95ebd18fb31793c9b84c7770d45.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zgecsr5mv.cu, normal z -> d, Thu Oct 8 23:05:48 2020
@author Weifeng Liu
*/
// CSR5 SpMV kernel
// see paper by W. Liu and B. Vinter. (2015).
// "CSR5: An Efficient Storage Format for Cross-Platform
// Sparse Matrix-Vector Multiplication".
// 29th ACM International Conference on Supercomputing (ICS15). pp. 339-350.
#include "magmasparse_internal.h"
#include "atomicopsdouble.h"
#include <cuda.h> // for CUDA_VERSION
#define MAGMA_CSR5_THREAD_GROUP 128
#define MAGMA_CSR5_THREAD_BUNCH 32
#if (defined( CUDA_VERSION ) && ( CUDA_VERSION >= 8000 ))
__inline__ __device__ void
sum_32(
double *s_sum,
const int local_id)
{
if (local_id < 16) s_sum[local_id] += s_sum[local_id + 16];
if (local_id < 8) s_sum[local_id] += s_sum[local_id + 8];
if (local_id < 4) s_sum[local_id] += s_sum[local_id + 4];
if (local_id < 2) s_sum[local_id] += s_sum[local_id + 2];
if (local_id < 1) s_sum[local_id] += s_sum[local_id + 1];
}
__inline__ __device__ void
scan_32(
double *s_scan,
const int local_id)
{
int ai, bi;
const int baseai = 2 * local_id + 1;
const int basebi = baseai + 1;
double temp;
if (local_id < 16) { ai = baseai - 1; bi = basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id == 0) { s_scan[31] = s_scan[15]; s_scan[15] = MAGMA_D_ZERO; }
if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 16) { ai = baseai - 1; bi = basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
}
__inline__ __device__ double
candidate(
double *d_value_tile,
double *d_x,
const magma_index_t *d_column_index_tile,
const magma_index_t candidate_index,
const double alpha)
{
double x = MAGMA_D_ZERO;
#if __CUDA_ARCH__ >= 350
x = __ldg(&d_x[d_column_index_tile[candidate_index]]);
#else
x = d_x[d_column_index_tile[candidate_index]];
#endif
return d_value_tile[candidate_index] * x * alpha;
}
//template<typename vT>
//__forceinline__ __device__
//vT segmented_sum_shfl(vT tmp_sum,
// const int scansum_offset,
// const int lane_id)
//{
// vT sum = __shfl_down(tmp_sum, 1);
// sum = lane_id == MAGMA_CSR5_OMEGA - 1 ? 0 : sum;
// // inclusive scan
// vT scan_sum = scan_32_shfl(sum); //scan_32_shfl<vT>(sum, lane_id);
// tmp_sum = __shfl_down(scan_sum, scansum_offset);
// tmp_sum = tmp_sum - scan_sum + sum;
//
// return tmp_sum;
//}
__forceinline__ __device__ double
segmented_sum(
double tmp_sum,
double *s_sum,
const magma_index_t scansum_offset,
const magma_index_t lane_id)
{
if (lane_id)
s_sum[lane_id - 1] = tmp_sum;
s_sum[lane_id] = lane_id == MAGMA_CSR5_OMEGA - 1
? MAGMA_D_ZERO : s_sum[lane_id];
double sum = tmp_sum = s_sum[lane_id];
scan_32(s_sum, lane_id); // exclusive scan
s_sum[lane_id] += tmp_sum; // inclusive scan (exclusive scan+original val)
tmp_sum = s_sum[lane_id + scansum_offset];
tmp_sum = tmp_sum - s_sum[lane_id] + sum;
return tmp_sum;
}
template<int c_sigma>
__inline__ __device__ void
tile_fast_track(
double *d_value_tile,
double *d_x,
const magma_index_t *d_column_index_tile,
double *d_calibrator,
//#if __CUDA_ARCH__ < 300
double *s_sum,
//#endif
const int lane_id,
const magma_index_t par_id,
const double alpha)
{
double sum = MAGMA_D_ZERO;
#pragma unroll
for (int i = 0; i < c_sigma; i++)
{
sum += candidate(d_value_tile, d_x, d_column_index_tile,
i * MAGMA_CSR5_OMEGA + lane_id, alpha);
}
//#if __CUDA_ARCH__ >= 300 // use shfl intrinsic
// sum = sum_32_shfl<vT>(sum);
// if (!lane_id)
// d_calibrator[par_id] = sum;
//#else // use smem
s_sum[lane_id] = sum;
sum_32(s_sum, lane_id);
if (!lane_id)
{
d_calibrator[par_id] = s_sum[0];
}
//#endif
}
template<int c_sigma>
__inline__ __device__ void
tile_normal_track(
const magma_index_t *d_column_index_tile,
double *d_value_tile,
double *d_x,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
double *d_calibrator,
double *d_y,
//#if __CUDA_ARCH__ < 300
double *s_sum,
volatile int *s_scan,
//#endif
const magma_index_t par_id,
const int lane_id,
const int bit_y_offset,
const int bit_scansum_offset,
const bool empty_rows,
const double alpha)
{
int start = 0;
int stop = 0;
bool local_bit;
double sum = MAGMA_D_ZERO;
magma_index_t offset_pointer = empty_rows ?
d_tile_desc_offset_ptr[par_id] : 0;
magma_uindex_t descriptor = d_tile_desc[lane_id];
magma_index_t y_offset = descriptor >> (32 - bit_y_offset);
const int scansum_offset = (descriptor << bit_y_offset)
>> (32 - bit_scansum_offset);
const int bit_bitflag = 32 - bit_y_offset - bit_scansum_offset;
bool direct = false;
double first_sum, last_sum;
// step 1. thread-level seg sum
int ly = 0;
// extract the first bit-flag packet
descriptor = descriptor << (bit_y_offset + bit_scansum_offset);
descriptor = lane_id ? descriptor : descriptor | 0x80000000;
local_bit = (descriptor >> 31) & 0x1;
start = !local_bit;
direct = local_bit & (bool)lane_id;
sum = candidate(d_value_tile, d_x,
d_column_index_tile, lane_id, alpha);
#pragma unroll
for (int i = 1; i < c_sigma; i++)
{
int norm_i = i - bit_bitflag;
if (!(ly || norm_i) || (ly && !(31 & norm_i)))
{
ly++;
descriptor = d_tile_desc[ly * MAGMA_CSR5_OMEGA + lane_id];
}
norm_i = !ly ? 31 & i : 31 & norm_i;
norm_i = 31 - norm_i;
local_bit = (descriptor >> norm_i) & 0x1;
if (local_bit)
{
if (direct)
d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset]
: y_offset] += sum;
else
first_sum = sum;
}
y_offset += local_bit & direct;
direct |= local_bit;
sum = local_bit ? MAGMA_D_ZERO : sum;
stop += local_bit;
sum += candidate(d_value_tile, d_x, d_column_index_tile,
i * MAGMA_CSR5_OMEGA + lane_id, alpha);
}
first_sum = direct ? first_sum : sum;
last_sum = sum;
// step 2. segmented sum
sum = start ? first_sum : MAGMA_D_ZERO;
//#if __CUDA_ARCH__ >= 300
// sum = segmented_sum_shfl<vT>(sum, scansum_offset, lane_id);
//#else
sum = segmented_sum(sum, s_sum, scansum_offset, lane_id);
//#endif
// step 3-1. add s_sum to position stop
last_sum += (start <= stop) ? sum : MAGMA_D_ZERO;
// step 3-2. write sums to result array
if (direct)
d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset]
: y_offset] += last_sum;
// the first/last value of the first thread goes to calibration
if (!lane_id)
d_calibrator[par_id] = direct ? first_sum : last_sum;
}
template<int c_sigma>
__inline__ __device__ void
spmv_tile(
const magma_index_t *d_column_index_tile,
double *d_value_tile,
const magma_index_t *d_row_pointer,
double *d_x,
const magma_uindex_t *d_tile_ptr,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
double *d_calibrator,
double *d_y,
const magma_index_t par_id,
const int lane_id,
const int bunch_id,
const int bit_y_offset,
const int bit_scansum_offset,
const double alpha)
{
//#if __CUDA_ARCH__ < 300
__shared__ double
s_sum[MAGMA_CSR5_THREAD_GROUP];
volatile __shared__ int
s_scan[(MAGMA_CSR5_OMEGA + 1) *
(MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA)];
//#endif
magma_uindex_t row_start, row_stop;
//#if __CUDA_ARCH__ >= 350
// if (lane_id < 2)
// row_start = __ldg(&d_tile_ptr[par_id + lane_id]);
// row_stop = __shfl(row_start, 1);
// row_start = __shfl(row_start, 0);
// row_stop &= 0x7FFFFFFF;
//#else
volatile __shared__ magma_uindex_t
s_row_start_stop[MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1];
if (threadIdx.x < MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1)
{
s_row_start_stop[threadIdx.x] = d_tile_ptr[par_id + threadIdx.x];
}
__syncthreads();
row_start = s_row_start_stop[bunch_id];
row_stop = s_row_start_stop[bunch_id + 1] & 0x7FFFFFFF;
//#endif
if (row_start == row_stop) // fast track through reduction
{
tile_fast_track<c_sigma>
(d_value_tile, d_x, d_column_index_tile, d_calibrator,
//#if __CUDA_ARCH__ < 300
&s_sum[bunch_id * MAGMA_CSR5_OMEGA],
//#endif
lane_id, par_id, alpha);
}
else
{
const bool empty_rows = (row_start >> 31) & 0x1;
row_start &= 0x7FFFFFFF;
d_y = &d_y[row_start+1];
tile_normal_track<c_sigma>
(d_column_index_tile, d_value_tile, d_x,
d_tile_desc, d_tile_desc_offset_ptr,
d_tile_desc_offset, d_calibrator, d_y,
//#if __CUDA_ARCH__ < 300
&s_sum[bunch_id * MAGMA_CSR5_OMEGA],
&s_scan[bunch_id * (MAGMA_CSR5_OMEGA + 1)],
//#endif
par_id, lane_id,
bit_y_offset, bit_scansum_offset, empty_rows, alpha);
}
}
template<int c_sigma>
__global__ void
spmv_csr5_compute_kernel(
const magma_index_t *d_column_index,
double *d_value,
const magma_index_t *d_row_pointer,
double *d_x,
const magma_uindex_t *d_tile_ptr,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
double *d_calibrator,
double *d_y,
const magma_index_t p,
const int num_packet,
const int bit_y_offset,
const int bit_scansum_offset,
const double alpha)
{
// warp lane id
const int lane_id = 31 & threadIdx.x; //threadIdx.x % CSR5_OMEGA;
// warp global id == par_id
const magma_index_t par_id = (blockIdx.x * blockDim.x + threadIdx.x)
/ MAGMA_CSR5_OMEGA;
const int bunch_id = threadIdx.x / MAGMA_CSR5_OMEGA;
if (par_id >= p - 1)
return;
spmv_tile<c_sigma>
(&d_column_index[par_id * MAGMA_CSR5_OMEGA * c_sigma],
&d_value[par_id * MAGMA_CSR5_OMEGA * c_sigma],
d_row_pointer, d_x, d_tile_ptr,
&d_tile_desc[par_id * MAGMA_CSR5_OMEGA * num_packet],
d_tile_desc_offset_ptr, d_tile_desc_offset,
d_calibrator, d_y,
par_id, lane_id, bunch_id, bit_y_offset, bit_scansum_offset, alpha);
}
__global__ void
spmv_csr5_calibrate_kernel(
const magma_uindex_t *d_tile_ptr,
const double *d_calibrator,
double *d_y,
const magma_index_t p)
{
//const int lane_id = threadIdx.x % MAGMA_CSR5_THREAD_BUNCH;
//const int bunch_id = threadIdx.x / MAGMA_CSR5_THREAD_BUNCH;
const int local_id = threadIdx.x;
const magma_index_t global_id = blockIdx.x * blockDim.x + threadIdx.x;
double sum;
volatile __shared__
magma_index_t s_tile_ptr[MAGMA_CSR5_THREAD_GROUP+1];
__shared__ double s_calibrator[MAGMA_CSR5_THREAD_GROUP];
//volatile __shared__
// double s_sum[MAGMA_CSR5_THREAD_GROUP
// / MAGMA_CSR5_THREAD_BUNCH];
s_tile_ptr[local_id] = global_id < p-1 ?
(magma_index_t)(d_tile_ptr[global_id] & 0x7FFFFFFF) : -1;
s_calibrator[local_id] = sum = global_id < p-1 ?
d_calibrator[global_id] : MAGMA_D_ZERO;
__syncthreads();
// do a fast track if all s_tile_ptr are the same
if (s_tile_ptr[0] == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP - 1])
{
//sum = sum_32_shfl<vT>(sum);
//if (!lane_id)
// s_sum[bunch_id] = sum;
//__syncthreads();
//if (!bunch_id)
//{
// sum = lane_id < (MAGMA_CSR5_THREAD_GROUP
// / MAGMA_CSR5_THREAD_BUNCH) ? s_sum[lane_id] : 0;
// sum = sum_32_shfl<vT>(sum);
//}
if (local_id < 64) s_calibrator[local_id] += s_calibrator[local_id+64];
__syncthreads();
if (local_id < 32) s_calibrator[local_id] += s_calibrator[local_id+32];
if (local_id < 16) s_calibrator[local_id] += s_calibrator[local_id+16];
if (local_id < 8) s_calibrator[local_id] += s_calibrator[local_id+8];
if (local_id < 4) s_calibrator[local_id] += s_calibrator[local_id+4];
if (local_id < 2) s_calibrator[local_id] += s_calibrator[local_id+2];
if (local_id < 1) s_calibrator[local_id] += s_calibrator[local_id+1];
if (!local_id)
{
atomicAdddouble(&d_y[s_tile_ptr[0]], s_calibrator[0]);
}
return;
}
int local_par_id = local_id;
magma_index_t row_start_current, row_start_target, row_start_previous;
sum = MAGMA_D_ZERO;
// use (p - 1), due to the tail tile is dealt with CSR-vector method
if (global_id < p - 1)
{
row_start_previous = local_id ? s_tile_ptr[local_id-1] : -1;
row_start_current = s_tile_ptr[local_id];
if (row_start_previous != row_start_current)
{
row_start_target = row_start_current;
while (row_start_target == row_start_current
&& local_par_id < blockDim.x)
{
sum += s_calibrator[local_par_id];
local_par_id++;
row_start_current = s_tile_ptr[local_par_id];
}
if (row_start_target == s_tile_ptr[0]
|| row_start_target == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP-1])
{
atomicAdddouble(&d_y[row_start_target], sum);
}
else
d_y[row_start_target] += sum;
}
}
}
__global__ void
spmv_csr5_tail_tile_kernel(
const magma_index_t *d_row_pointer,
const magma_index_t *d_column_index,
double *d_value,
double *d_x,
double *d_y,
const magma_index_t tail_tile_start,
const magma_index_t p,
const int sigma,
const double alpha)
{
const int local_id = threadIdx.x;
const magma_index_t row_id = tail_tile_start + blockIdx.x;
const magma_index_t row_start = !blockIdx.x ? (p - 1)
* MAGMA_CSR5_OMEGA * sigma
: d_row_pointer[row_id];
const magma_index_t row_stop = d_row_pointer[row_id + 1];
double sum = MAGMA_D_ZERO;
for (magma_index_t idx = local_id + row_start;
idx < row_stop; idx += MAGMA_CSR5_OMEGA)
{
sum += candidate(d_value, d_x, d_column_index, idx, alpha);
}
//#if __CUDA_ARCH__ >= 300 // use shfl intrinsic
// sum = sum_32_shfl<vT>(sum);
//#else
__shared__ double s_sum[MAGMA_CSR5_OMEGA];
s_sum[local_id] = sum;
sum_32(s_sum, local_id);
//#endif
if (!local_id)
d_y[row_id] += s_sum[0]; //= !blockIdx.x ? d_y[row_id] + sum : sum;
}
__global__ void
dgecsr5mv_kernel_update_y(int num_rows,
double beta,
double * dy)
{
const magma_index_t row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < num_rows)
{
if (beta == MAGMA_D_ZERO)
dy[row] = MAGMA_D_ZERO;
else
dy[row] *= beta;
}
}
#endif
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR5 (val (tile-wise column-major),
row_pointer,
col (tile-wise column-major),
tile_pointer,
tile_desc).
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
p magma_int_t
number of tiles in A
@param[in]
alpha double
scalar multiplier
@param[in]
sigma magma_int_t
sigma in A in CSR5
@param[in]
bit_y_offset magma_int_t
bit_y_offset in A in CSR5
@param[in]
bit_scansum_offset magma_int_t
bit_scansum_offset in A in CSR5
@param[in]
num_packet magma_int_t
num_packet in A in CSR5
@param[in]
dtile_ptr magmaUIndex_ptr
tilepointer of A in CSR5
@param[in]
dtile_desc magmaUIndex_ptr
tiledescriptor of A in CSR5
@param[in]
dtile_desc_offset_ptr magmaIndex_ptr
tiledescriptor_offsetpointer of A in CSR5
@param[in]
dtile_desc_offset magmaIndex_ptr
tiledescriptor_offsetpointer of A in CSR5
@param[in]
dcalibrator magmaDouble_ptr
calibrator of A in CSR5
@param[in]
tail_tile_start magma_int_t
start of the last tile in A
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgecsr5mv(
magma_trans_t transA,
magma_int_t m,
magma_int_t n,
magma_int_t p,
double alpha,
magma_int_t sigma,
magma_int_t bit_y_offset,
magma_int_t bit_scansum_offset,
magma_int_t num_packet,
magmaUIndex_ptr dtile_ptr,
magmaUIndex_ptr dtile_desc,
magmaIndex_ptr dtile_desc_offset_ptr,
magmaIndex_ptr dtile_desc_offset,
magmaDouble_ptr dcalibrator,
magma_int_t tail_tile_start,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
int info = MAGMA_ERR_NOT_SUPPORTED;
#if (defined( CUDA_VERSION ) && ( CUDA_VERSION >= 8000 ))
magma_int_t arch = magma_getdevice_arch();
if ( arch >= 600 ) {
//dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
//magma_int_t threads = BLOCK_SIZE;
//dgecsrmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
// (m, n, alpha, dval, drowptr, dcolind, dx, beta, dy);
// phase 1. update y: y = beta * y
magma_int_t num_threads = MAGMA_CSR5_THREAD_GROUP;
magma_int_t num_blocks = magma_ceildiv( m, num_threads );
//ceil ((double)m / (double)num_threads);
dgecsr5mv_kernel_update_y
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>(m, beta, dy);
// phase 2. spmv: y += alpha * A * x
num_threads = MAGMA_CSR5_THREAD_GROUP;
num_blocks = magma_ceildiv( p-1, num_threads / MAGMA_CSR5_OMEGA );
// ceil ((double)(p-1) / (double)(num_threads / MAGMA_CSR5_OMEGA));
switch (sigma)
{
case 4:
spmv_csr5_compute_kernel<4>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 5:
spmv_csr5_compute_kernel<5>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 6:
spmv_csr5_compute_kernel<6>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 7:
spmv_csr5_compute_kernel<7>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 8:
spmv_csr5_compute_kernel<8>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 9:
spmv_csr5_compute_kernel<9>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 10:
spmv_csr5_compute_kernel<10>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 11:
spmv_csr5_compute_kernel<11>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 12:
spmv_csr5_compute_kernel<12>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 13:
spmv_csr5_compute_kernel<13>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 14:
spmv_csr5_compute_kernel<14>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 15:
spmv_csr5_compute_kernel<15>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 16:
spmv_csr5_compute_kernel<16>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 17:
spmv_csr5_compute_kernel<17>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 18:
spmv_csr5_compute_kernel<18>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 19:
spmv_csr5_compute_kernel<19>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 20:
spmv_csr5_compute_kernel<20>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 21:
spmv_csr5_compute_kernel<21>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 22:
spmv_csr5_compute_kernel<22>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 23:
spmv_csr5_compute_kernel<23>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 24:
spmv_csr5_compute_kernel<24>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 25:
spmv_csr5_compute_kernel<25>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 26:
spmv_csr5_compute_kernel<26>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 27:
spmv_csr5_compute_kernel<27>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 28:
spmv_csr5_compute_kernel<28>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 29:
spmv_csr5_compute_kernel<29>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 30:
spmv_csr5_compute_kernel<30>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 31:
spmv_csr5_compute_kernel<31>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 32:
spmv_csr5_compute_kernel<32>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
}
num_threads = MAGMA_CSR5_THREAD_GROUP;
num_blocks = ceil((double)(p-1)/(double)num_threads);
spmv_csr5_calibrate_kernel
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dtile_ptr, dcalibrator, dy, p);
num_threads = MAGMA_CSR5_OMEGA;
num_blocks = m - tail_tile_start;
spmv_csr5_tail_tile_kernel
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(drowptr, dcolind, dval, dx, dy,
tail_tile_start, p, sigma, alpha);
info = MAGMA_SUCCESS;
}
else {
info = MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return info;
}
|
a092c95b99a0ed0db4a3767bd778b531a517b3d8.hip | // !!! This is a file automatically generated by hipify!!!
/// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_sage.cu
*
* @brief Simple test driver program for single source shortest path.
*/
#include <gunrock/app/sage/sage_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return hipError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use int as the value type
hipError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
typedef typename app::TestGraph<VertexT, SizeT, ValueT,
// graph::HAS_EDGE_VALUES | graph::HAS_CSR>
graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
hipError_t retval = hipSuccess;
util::CpuTimer cpu_timer;
GraphT graph; // graph we process on
cpu_timer.Start();
GUARD_CU(graphio::LoadGraph(parameters, graph));
// force edge values to be 1, don't enable this unless you really want to
// for (SizeT e=0; e < graph.edges; e++)
// graph.CsrT::edge_values[e] = 1;
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
// GUARD_CU(graph.CsrT::edge_values.Print("", 100));
// util::PrintMsg("sizeof(VertexT) = " + std::to_string(sizeof(VertexT))
// + ", sizeof(SizeT) = " + std::to_string(sizeof(SizeT))
// + ", sizeof(ValueT) = " + std::to_string(sizeof(ValueT)));
std::vector<std::string> switches{
"feature-column", "num-children-per-source", "num-leafs-per-child"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[](util::Parameters ¶meters, GraphT &graph) {
hipError_t retval = hipSuccess;
bool quick = parameters.Get<bool>("quick");
if (!quick) {
bool quiet = parameters.Get<bool>("quiet");
std::string wf1_file = parameters.Get<std::string>("Wf1");
std::string wa1_file = parameters.Get<std::string>("Wa1");
std::string wf2_file = parameters.Get<std::string>("Wf2");
std::string wa2_file = parameters.Get<std::string>("Wf2");
std::string feature_file = parameters.Get<std::string>("features");
int Wf1_dim_0 =
parameters.Get<int>("feature-column"); //("Wf1-dim0");
int Wa1_dim_0 =
parameters.Get<int>("feature-column"); //("Wa1-dim0");
int Wf1_dim_1 = parameters.Get<int>("Wf1-dim1");
int Wa1_dim_1 = parameters.Get<int>("Wa1-dim1");
int Wf2_dim_0 =
Wf1_dim_1 + Wa1_dim_1; // parameters.Get<int> ("Wf2-dim0");
int Wa2_dim_0 =
Wf1_dim_1 + Wa1_dim_1; // parameters.Get<int> ("Wa2-dim0");
int Wf2_dim_1 = parameters.Get<int>("Wf2-dim1");
int Wa2_dim_1 = parameters.Get<int>("Wa2-dim1");
int num_neigh1 = parameters.Get<int>("num-children-per-source");
int num_neigh2 = parameters.Get<int>("num-leafs-per-child");
if (!util::isValid(num_neigh2)) num_neigh2 = num_neigh1;
int batch_size = parameters.Get<int>("batch-size");
ValueT **W_f_1 = app::sage::template ReadMatrix<ValueT, SizeT>(
wf1_file, Wf1_dim_0, Wf1_dim_1);
ValueT **W_a_1 = app::sage::template ReadMatrix<ValueT, SizeT>(
wa1_file, Wa1_dim_0, Wa1_dim_1);
ValueT **W_f_2 = app::sage::template ReadMatrix<ValueT, SizeT>(
wf2_file, Wf2_dim_0, Wf2_dim_1);
ValueT **W_a_2 = app::sage::template ReadMatrix<ValueT, SizeT>(
wa2_file, Wa2_dim_0, Wa2_dim_1);
ValueT **features = app::sage::template ReadMatrix<ValueT, SizeT>(
feature_file, graph.nodes, Wf1_dim_0);
ValueT *source_embedding =
new ValueT[(uint64_t)graph.nodes * (Wa2_dim_1 + Wf2_dim_1)];
util::PrintMsg("Computing reference value ...", !quiet);
util::PrintMsg("__________________________", !quiet);
float elapsed = app::sage::CPU_Reference(
parameters, graph, features, W_f_1, W_a_1, W_f_2, W_a_2,
source_embedding, quiet);
util::PrintMsg(
"--------------------------\n"
"CPU Reference elapsed: " +
std::to_string(elapsed) + " ms.",
!quiet);
app::sage::Validate_Results(parameters, graph, source_embedding,
Wa2_dim_1 + Wf2_dim_1, true);
delete[] source_embedding;
source_embedding = NULL;
for (auto v = 0; v < graph.nodes; v++) {
delete[] features[v];
features[v] = NULL;
}
delete[] features;
features = NULL;
}
std::vector<std::string> switches2{"batch-size"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches2,
[](util::Parameters ¶meters, GraphT &graph) {
return app::sage::RunTests(parameters, graph);
}));
return retval;
}));
return retval;
}
};
int main(int argc, char **argv) {
hipError_t retval = hipSuccess;
util::Parameters parameters("test sage");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::sage::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return hipSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B |
app::SIZET_U32B | // app::SIZET_U64B |
app::VALUET_F32B | app::DIRECTED | app::UNDIRECTED>(
parameters, main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| a092c95b99a0ed0db4a3767bd778b531a517b3d8.cu | /// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_sage.cu
*
* @brief Simple test driver program for single source shortest path.
*/
#include <gunrock/app/sage/sage_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return cudaError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use int as the value type
cudaError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
typedef typename app::TestGraph<VertexT, SizeT, ValueT,
// graph::HAS_EDGE_VALUES | graph::HAS_CSR>
graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
cudaError_t retval = cudaSuccess;
util::CpuTimer cpu_timer;
GraphT graph; // graph we process on
cpu_timer.Start();
GUARD_CU(graphio::LoadGraph(parameters, graph));
// force edge values to be 1, don't enable this unless you really want to
// for (SizeT e=0; e < graph.edges; e++)
// graph.CsrT::edge_values[e] = 1;
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
// GUARD_CU(graph.CsrT::edge_values.Print("", 100));
// util::PrintMsg("sizeof(VertexT) = " + std::to_string(sizeof(VertexT))
// + ", sizeof(SizeT) = " + std::to_string(sizeof(SizeT))
// + ", sizeof(ValueT) = " + std::to_string(sizeof(ValueT)));
std::vector<std::string> switches{
"feature-column", "num-children-per-source", "num-leafs-per-child"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[](util::Parameters ¶meters, GraphT &graph) {
cudaError_t retval = cudaSuccess;
bool quick = parameters.Get<bool>("quick");
if (!quick) {
bool quiet = parameters.Get<bool>("quiet");
std::string wf1_file = parameters.Get<std::string>("Wf1");
std::string wa1_file = parameters.Get<std::string>("Wa1");
std::string wf2_file = parameters.Get<std::string>("Wf2");
std::string wa2_file = parameters.Get<std::string>("Wf2");
std::string feature_file = parameters.Get<std::string>("features");
int Wf1_dim_0 =
parameters.Get<int>("feature-column"); //("Wf1-dim0");
int Wa1_dim_0 =
parameters.Get<int>("feature-column"); //("Wa1-dim0");
int Wf1_dim_1 = parameters.Get<int>("Wf1-dim1");
int Wa1_dim_1 = parameters.Get<int>("Wa1-dim1");
int Wf2_dim_0 =
Wf1_dim_1 + Wa1_dim_1; // parameters.Get<int> ("Wf2-dim0");
int Wa2_dim_0 =
Wf1_dim_1 + Wa1_dim_1; // parameters.Get<int> ("Wa2-dim0");
int Wf2_dim_1 = parameters.Get<int>("Wf2-dim1");
int Wa2_dim_1 = parameters.Get<int>("Wa2-dim1");
int num_neigh1 = parameters.Get<int>("num-children-per-source");
int num_neigh2 = parameters.Get<int>("num-leafs-per-child");
if (!util::isValid(num_neigh2)) num_neigh2 = num_neigh1;
int batch_size = parameters.Get<int>("batch-size");
ValueT **W_f_1 = app::sage::template ReadMatrix<ValueT, SizeT>(
wf1_file, Wf1_dim_0, Wf1_dim_1);
ValueT **W_a_1 = app::sage::template ReadMatrix<ValueT, SizeT>(
wa1_file, Wa1_dim_0, Wa1_dim_1);
ValueT **W_f_2 = app::sage::template ReadMatrix<ValueT, SizeT>(
wf2_file, Wf2_dim_0, Wf2_dim_1);
ValueT **W_a_2 = app::sage::template ReadMatrix<ValueT, SizeT>(
wa2_file, Wa2_dim_0, Wa2_dim_1);
ValueT **features = app::sage::template ReadMatrix<ValueT, SizeT>(
feature_file, graph.nodes, Wf1_dim_0);
ValueT *source_embedding =
new ValueT[(uint64_t)graph.nodes * (Wa2_dim_1 + Wf2_dim_1)];
util::PrintMsg("Computing reference value ...", !quiet);
util::PrintMsg("__________________________", !quiet);
float elapsed = app::sage::CPU_Reference(
parameters, graph, features, W_f_1, W_a_1, W_f_2, W_a_2,
source_embedding, quiet);
util::PrintMsg(
"--------------------------\n"
"CPU Reference elapsed: " +
std::to_string(elapsed) + " ms.",
!quiet);
app::sage::Validate_Results(parameters, graph, source_embedding,
Wa2_dim_1 + Wf2_dim_1, true);
delete[] source_embedding;
source_embedding = NULL;
for (auto v = 0; v < graph.nodes; v++) {
delete[] features[v];
features[v] = NULL;
}
delete[] features;
features = NULL;
}
std::vector<std::string> switches2{"batch-size"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches2,
[](util::Parameters ¶meters, GraphT &graph) {
return app::sage::RunTests(parameters, graph);
}));
return retval;
}));
return retval;
}
};
int main(int argc, char **argv) {
cudaError_t retval = cudaSuccess;
util::Parameters parameters("test sage");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::sage::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return cudaSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B |
app::SIZET_U32B | // app::SIZET_U64B |
app::VALUET_F32B | app::DIRECTED | app::UNDIRECTED>(
parameters, main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
864e94f5ac5be10b3502034b2fa4517b2e9911c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "gpu.h"
// Cuda cores per multiprocessor from Compute Capability
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct {
int SM; // 0xMm (hexadecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192}, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ 0x52, 128}, // Maxwell Generation (SM 5.2) GM20x class
{ 0x53, 128}, // Maxwell Generation (SM 5.3) GM20x class
{ 0x60, 64 }, // Pascal Generation (SM 6.0) GP100 class
{ 0x61, 128}, // Pascal Generation (SM 6.1) GP10x class
{ 0x62, 128}, // Pascal Generation (SM 6.2) GP10x class
{ 0x70, 64 }, // Volta Generation (SM 7.0) GV100 class
{ 0x72, 64 }, // Xavier Generation (SM 7.2) GV10B class
{ 0x75, 64 }, // Turing Generation (SM 7.5) TU102 class
{ 0x80, 64 }, // Ampere Generation (SM 8.0) GA10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)){
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
//fprintf(stderr, "MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index-1].Cores);
return nGpuArchCoresPerSM[index-1].Cores;
}
// How many local GPUs
int gpu_count(){
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess){
//fprintf(stderr, "Error: hipGetDeviceCount returns %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id));
deviceCount = -1;
}
return deviceCount;
}
// Load GPU properties
int gpu_properties(hipDeviceProp_t* deviceProp, int deviceCount){
hipError_t error_id;
for (int c = 0; c < deviceCount; c++){
error_id = hipGetDeviceProperties(&deviceProp[c], c);
if (error_id != hipSuccess) return -1;
}
return deviceCount;
}
// Descriptive GPU details
int gpu_description(char* buffer){
int deviceCount = gpu_count();
if ( deviceCount < 0 ) return -1;
hipDeviceProp_t *deviceProp = (hipDeviceProp_t *)malloc(sizeof(hipDeviceProp_t) * deviceCount);
gpu_properties(deviceProp, deviceCount);
int driverVersion = 0;
int runtimeVersion = 0;
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
// GPU chipset family
char family[32];
// Description entry
char buf_entry[4096];
for (int c = 0; c < deviceCount && c < MAX_GPU; c++){
// Get GPU family
switch ( deviceProp[c].major ){
case 3: sprintf(family, "Kepler"); break;
case 5: sprintf(family, "Maxwell"); break;
case 6: sprintf(family, "Pascal"); break;
case 7:
if ( deviceProp[c].minor == 0 )
sprintf(family, "Volta");
if ( deviceProp[c].minor == 2 )
sprintf(family, "Xavier");
if ( deviceProp[c].minor == 5 )
sprintf(family, "Turing");
break;
case 8: sprintf(family, "Ampere"); break;
}
snprintf(buf_entry, 4096, "\
Device: %d\n\
Name: %s\n\
Family: %s\n\
Capability: %d.%d\n\
Multiprocessors: %d\n\
Cores / MP: %d\n\
Global Memory: %.0f MB\n\
Driver: %d.%d\n\
Runtime: %d.%d\n",
c,
deviceProp[c].name,
family,
deviceProp[c].major, deviceProp[c].minor,
deviceProp[c].multiProcessorCount,
_ConvertSMVer2Cores(deviceProp[c].major, deviceProp[c].minor),
(float)deviceProp[c].totalGlobalMem/1048576.0f,
driverVersion/1000, (driverVersion%100)/10,
runtimeVersion/1000, (runtimeVersion%100)/10);
// Add to description
strncat(buffer, buf_entry, 4096);
}
free(deviceProp);
return deviceCount;
}
| 864e94f5ac5be10b3502034b2fa4517b2e9911c0.cu | #include <stdio.h>
#include "gpu.h"
// Cuda cores per multiprocessor from Compute Capability
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct {
int SM; // 0xMm (hexadecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192}, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ 0x52, 128}, // Maxwell Generation (SM 5.2) GM20x class
{ 0x53, 128}, // Maxwell Generation (SM 5.3) GM20x class
{ 0x60, 64 }, // Pascal Generation (SM 6.0) GP100 class
{ 0x61, 128}, // Pascal Generation (SM 6.1) GP10x class
{ 0x62, 128}, // Pascal Generation (SM 6.2) GP10x class
{ 0x70, 64 }, // Volta Generation (SM 7.0) GV100 class
{ 0x72, 64 }, // Xavier Generation (SM 7.2) GV10B class
{ 0x75, 64 }, // Turing Generation (SM 7.5) TU102 class
{ 0x80, 64 }, // Ampere Generation (SM 8.0) GA10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)){
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
//fprintf(stderr, "MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index-1].Cores);
return nGpuArchCoresPerSM[index-1].Cores;
}
// How many local GPUs
int gpu_count(){
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess){
//fprintf(stderr, "Error: cudaGetDeviceCount returns %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
deviceCount = -1;
}
return deviceCount;
}
// Load GPU properties
int gpu_properties(cudaDeviceProp* deviceProp, int deviceCount){
cudaError_t error_id;
for (int c = 0; c < deviceCount; c++){
error_id = cudaGetDeviceProperties(&deviceProp[c], c);
if (error_id != cudaSuccess) return -1;
}
return deviceCount;
}
// Descriptive GPU details
int gpu_description(char* buffer){
int deviceCount = gpu_count();
if ( deviceCount < 0 ) return -1;
cudaDeviceProp *deviceProp = (cudaDeviceProp *)malloc(sizeof(cudaDeviceProp) * deviceCount);
gpu_properties(deviceProp, deviceCount);
int driverVersion = 0;
int runtimeVersion = 0;
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
// GPU chipset family
char family[32];
// Description entry
char buf_entry[4096];
for (int c = 0; c < deviceCount && c < MAX_GPU; c++){
// Get GPU family
switch ( deviceProp[c].major ){
case 3: sprintf(family, "Kepler"); break;
case 5: sprintf(family, "Maxwell"); break;
case 6: sprintf(family, "Pascal"); break;
case 7:
if ( deviceProp[c].minor == 0 )
sprintf(family, "Volta");
if ( deviceProp[c].minor == 2 )
sprintf(family, "Xavier");
if ( deviceProp[c].minor == 5 )
sprintf(family, "Turing");
break;
case 8: sprintf(family, "Ampere"); break;
}
snprintf(buf_entry, 4096, "\
Device: %d\n\
Name: %s\n\
Family: %s\n\
Capability: %d.%d\n\
Multiprocessors: %d\n\
Cores / MP: %d\n\
Global Memory: %.0f MB\n\
Driver: %d.%d\n\
Runtime: %d.%d\n",
c,
deviceProp[c].name,
family,
deviceProp[c].major, deviceProp[c].minor,
deviceProp[c].multiProcessorCount,
_ConvertSMVer2Cores(deviceProp[c].major, deviceProp[c].minor),
(float)deviceProp[c].totalGlobalMem/1048576.0f,
driverVersion/1000, (driverVersion%100)/10,
runtimeVersion/1000, (runtimeVersion%100)/10);
// Add to description
strncat(buffer, buf_entry, 4096);
}
free(deviceProp);
return deviceCount;
}
|
df68a2237769119a38098b1de90c42c5ff70e601.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include "hip/hip_runtime.h"
#include "utils.cuh"
#include "../device/device_context.cuh"
#include "tsvd.h"
#include <ctime>
#include <thrust/iterator/counting_iterator.h>
#include<algorithm>
#include <thrust/sequence.h>
#include <thrust/inner_product.h>
#include <thrust/transform_reduce.h>
namespace tsvd
{
/**
* Division utility to get explained variance ratio
*
* @param XVar
* @param XVarSum
* @param ExplainedVarRatio
* @param context
*/
void divide(const Matrix<float> &XVar, const Matrix<float> &XVarSum, Matrix<float> &ExplainedVarRatio, DeviceContext &context){
auto d_x_var = XVar.data();
auto d_x_var_sum = XVarSum.data();
auto d_expl_var_ratio = ExplainedVarRatio.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+ExplainedVarRatio.size(), [=]__device__(int idx){
float div_val = 0.0;
//XVarSum can possibly be zero
if(d_x_var_sum[0] != 0.0){
div_val = d_x_var[idx] / d_x_var_sum[0];
}
d_expl_var_ratio[idx] = div_val;
} );
}
/**
* Square each value in a matrix
*
* @param UmultSigma
* @param UmultSigmaSquare
* @param context
*/
void square_val(const Matrix<float> &UmultSigma, Matrix<float> &UmultSigmaSquare, DeviceContext &context){
auto n = UmultSigma.columns();
auto m = UmultSigma.rows();
auto k = UmultSigmaSquare.rows();
auto d_u_mult_sigma = UmultSigma.data();
auto d_u_mult_sigma_square = UmultSigmaSquare.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+UmultSigmaSquare.size(), [=]__device__(int idx){
float square_val = ::pow(d_u_mult_sigma[idx],2);
d_u_mult_sigma_square[idx] = square_val;
} );
}
/**
* Alternative variance calculation (Can be slow for big matrices)
*
* @param UmultSigma
* @param k
* @param UmultSigmaVar
* @param context
*/
void calc_var(const Matrix<float>UmultSigma, int k, Matrix<float> &UmultSigmaVar, DeviceContext &context){
//Set aside matrix of 1's for getting columnar sums(t(UmultSima) * UmultOnes)
Matrix<float>UmultOnes(UmultSigma.rows(), 1);
UmultOnes.fill(1.0f);
//Allocate matrices for variance calculation
Matrix<float>UmultSigmaSquare(UmultSigma.rows(), UmultSigma.columns());
Matrix<float>UmultSigmaSum(k, 1);
Matrix<float>UmultSigmaSumSquare(k, 1);
Matrix<float>UmultSigmaSumOfSquare(k, 1);
Matrix<float>UmultSigmaVarNum(k, 1);
//Calculate Variance
square_val(UmultSigma, UmultSigmaSquare, context);
multiply(UmultSigmaSquare, UmultOnes, UmultSigmaSumOfSquare, context, true, false, 1.0f);
multiply(UmultSigma, UmultOnes, UmultSigmaSum, context, true, false, 1.0f);
square_val(UmultSigmaSum, UmultSigmaSumSquare, context);
//Get rows
auto m = UmultSigma.rows();
multiply(UmultSigmaSumOfSquare, m, context);
subtract(UmultSigmaSumOfSquare, UmultSigmaSumSquare, UmultSigmaVarNum, context);
auto d_u_sigma_var_num = UmultSigmaVarNum.data();
auto d_u_sigma_var = UmultSigmaVar.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+UmultSigmaVar.size(), [=]__device__(int idx){
float div_val = d_u_sigma_var_num[idx]/(::pow(m,2));
d_u_sigma_var[idx] = div_val;
} );
}
template<typename T>
class variance_iterator{
public:
// Required iterator traits
typedef variance_iterator<T> self_type; ///< My own type
typedef size_t difference_type; ///< Type to express the result of subtracting one iterator from another
typedef T value_type; ///< The type of the element the iterator can point to
typedef T* pointer; ///< The type of a pointer to an element the iterator can point to
typedef T reference; ///< The type of a reference to an element the iterator can point to
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
const T* data_ptr;
const T* mean_ptr;
const int col_rows;
size_t offset;
__device__ T operator[](size_t idx){
idx = idx + offset;
T mean = mean_ptr[idx/col_rows];
T dev_square = pow((data_ptr[idx] - mean),2);
return dev_square;
}
__device__ self_type operator+(size_t idx){
self_type retval(data_ptr, mean_ptr, col_rows);
retval.offset += idx;
return retval;
}
__host__ __device__ variance_iterator(const T* data_ptr, const T* mean_ptr, const int col_rows):data_ptr(data_ptr), mean_ptr(mean_ptr), col_rows(col_rows), offset(0){
}
};
/**
* Utility to calculate variance for each column of a matrix
*
* @param X
* @param UColMean
* @param UVar
* @param context
*/
void calc_var_numerator(const Matrix<float> &X, const Matrix<float> &UColMean, Matrix<float> &UVar, DeviceContext &context){
auto m = X.rows();
variance_iterator<float> variance(X.data(), UColMean.data(), m);
thrust::device_vector<int> segments(X.columns() + 1);
thrust::sequence(segments.begin(), segments.end(), 0, static_cast<int>(X.rows()));
// Determine temporary device storage requirements
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
int cols = static_cast<int>(X.columns());
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(),
cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1));
// Allocate temporary storage
safe_cuda(hipMalloc(&d_temp_storage, temp_storage_bytes));
// Run sum-reduction
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(),
cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1));
safe_cuda(hipFree(d_temp_storage));
}
/**
* Utility to reverse q to show most import k to least important k
*
* @param Q
* @param QReversed
* @param context
*/
void col_reverse_q(const Matrix<float> &Q, Matrix<float> &QReversed, DeviceContext &context){
auto n = Q.columns();
auto m = Q.rows();
auto k = QReversed.rows();
auto d_q = Q.data();
auto d_q_reversed = QReversed.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+QReversed.size(), [=]__device__(int idx){
int dest_row = idx % m;
int dest_col = idx/m;
int src_row = dest_row;
int src_col = (n - dest_col) - 1;
d_q_reversed[idx] = d_q[src_col * m + src_row];
} );
}
/**
* Truncate Q transpose to top k
*
* @param Qt
* @param QtTrunc
* @param context
*/
void row_reverse_trunc_q(const Matrix<float> &Qt, Matrix<float> &QtTrunc, DeviceContext &context){
auto m = Qt.rows();
auto k = QtTrunc.rows();
auto d_q = Qt.data();
auto d_q_trunc = QtTrunc.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+QtTrunc.size(), [=]__device__(int idx){
int dest_row = idx % k;
int dest_col = idx / k;
int src_row = (m - dest_row) - 1;
int src_col = dest_col;
float q = d_q[src_col * m + src_row];
d_q_trunc[idx] = q;
} );
}
/**
* Calculate the U matrix, which is defined as:
* U = A*V/sigma where A is our X Matrix, V is Q, and sigma is 1/w_i
*
* @param X
* @param Q
* @param w
* @param U
* @param context
*/
void calculate_u(const Matrix<float> &X, const Matrix<float> &Q, const Matrix<float> &w, Matrix<float> &U, DeviceContext &context){
multiply(X, Q, U, context, false, false, 1.0f); //A*V
auto d_u = U.data();
auto d_sigma = w.data();
auto column_size = U.rows();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+U.size(), [=]__device__(int idx){
int column = idx/column_size;
float sigma = d_sigma[column];
float u = d_u[idx];
if(sigma != 0.0){
d_u[idx] = u * 1.0/sigma;
} else{
d_u[idx] = 0.0;
}
} );
}
/**
* Obtain SVD attributes, which are as follows:
* 1.Singular Values
* 2.U matrix
* 3.Explained Variance
* 4.Explained Variance Ratio
*/
void get_tsvd_attr(Matrix<float> &X, Matrix<float> &Q, double* _Q, Matrix<float> &w, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param, DeviceContext &context){
//Obtain Q^T to obtain vector as row major order
Matrix<float>Qt(Q.columns(), Q.rows());
transpose(Q, Qt, context); //Needed for calculate_u()
Matrix<float>QtTrunc(_param.k, Qt.columns());
row_reverse_trunc_q(Qt, QtTrunc, context);
QtTrunc.copy_to_host(_Q); //Send to host
//Obtain square root of eigenvalues, which are singular values
w.transform([=]__device__(float elem){
if(elem > 0.0){
return std::sqrt(elem);
}else{
return 0.0f;
}
}
);
//Sort from biggest singular value to smallest
std::vector<double> w_temp(w.size());
w.copy_to_host(w_temp.data()); //Send to host
std::reverse(w_temp.begin(), w_temp.end());
std::copy(w_temp.begin(), w_temp.begin() + _param.k, _w);
Matrix<float>sigma(_param.k, 1);
sigma.copy(w_temp.data());
//Get U matrix
Matrix<float>U(X.rows(), _param.k);
Matrix<float>QReversed(Q.rows(), Q.columns());
col_reverse_q(Q, QReversed, context);
calculate_u(X, QReversed, sigma, U, context);
U.copy_to_host(_U); //Send to host
//Explained Variance
Matrix<float>UmultSigma(U.rows(), U.columns());
//U * Sigma
multiply_diag(U, sigma, UmultSigma, context, false);
Matrix<float>UOnesSigma(UmultSigma.rows(), 1);
UOnesSigma.fill(1.0f);
Matrix<float>USigmaVar(_param.k, 1);
Matrix<float>USigmaColMean(_param.k, 1);
multiply(UmultSigma, UOnesSigma, USigmaColMean, context, true, false, 1.0f);
float m_usigma = UmultSigma.rows();
multiply(USigmaColMean, 1/m_usigma, context);
calc_var_numerator(UmultSigma, USigmaColMean, USigmaVar, context);
multiply(USigmaVar, 1/m_usigma, context);
USigmaVar.copy_to_host(_explained_variance);
//Explained Variance Ratio
//Set aside matrix of 1's for getting sum of columnar variances
Matrix<float>XmultOnes(X.rows(), 1);
XmultOnes.fill(1.0f);
Matrix<float>XVar(X.columns(), 1);
Matrix<float>XColMean(X.columns(), 1);
multiply(X, XmultOnes, XColMean, context, true, false, 1.0f);
float m = X.rows();
multiply(XColMean, 1/m, context);
calc_var_numerator(X, XColMean, XVar, context);
multiply(XVar, 1/m, context);
Matrix<float>XVarSum(1,1);
multiply(XVar, XmultOnes, XVarSum, context, true, false, 1.0f);
Matrix<float>ExplainedVarRatio(_param.k, 1);
divide(USigmaVar, XVarSum, ExplainedVarRatio, context);
ExplainedVarRatio.copy_to_host(_explained_variance_ratio);
}
/**
* Conduct truncated svd using hipsolverDnSsyevd
*
* @param X
* @param _Q
* @param _w
* @param _U
* @param _explained_variance
* @param _explained_variance_ratio
* @param _param
*/
void cusolver_tsvd(Matrix<float> &X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param){
//Allocate matrix for X^TX
Matrix<float>XtX(_param.X_n, _param.X_n);
//Create context
DeviceContext context;
//Multiply X and Xt and output result to XtX
multiply(X, X, XtX, context, true, false, 1.0f);
//Set up Q (V^T) and w (singular value) matrices (w is a matrix of size Q.rows() by 1; really just a vector
Matrix<float>Q(XtX.rows(), XtX.columns()); // n X n -> V^T
Matrix<float>w(Q.rows(), 1);
calculate_eigen_pairs_exact(XtX, Q, w, context);
//Get tsvd attributes
get_tsvd_attr(X, Q, _Q, w, _w, _U, _explained_variance, _explained_variance_ratio, _param, context);
}
/**
* Conduct truncated svd using the power method
*
* @param X
* @param _Q
* @param _w
* @param _U
* @param _explained_variance
* @param _explained_variance_ratio
* @param _param
*/
void power_tsvd(Matrix<float> &X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param){
//Allocate matrix for X^TX
Matrix<float>M(_param.X_n, _param.X_n);
//Allocate matrix to be used in hipblasSger outer product calculation
Matrix<float>A(M.rows(), M.columns());
//Create context
DeviceContext context;
//Multiply X and Xt and output result to XtX
multiply(X, X, M, context, true, false, 1.0f);
//Set up Q (V^T) and w (singular value) matrices (w is a matrix of size Q.rows() by 1; really just a vector
Matrix<float>Q(M.rows(), _param.k);
Matrix<float>w(_param.k, 1);
std::vector<double> w_temp(w.size());
/* Power Method for finding all eigenvectors/eigen values
* Logic:
*
* Since the matrix is symmetric, there exists an orthogonal basis of eigenvectors. Once you have found an eigenvector, extend it to an orthogonal basis,
* rewrite the matrix in terms of this basis and restrict to the orthogonal space of the known eigenvector.
* This is comprised in the method of deflation:
* You first find the first eigenvector v1 (with a maximal 1), by iterating xn+1=Axn/|Axn|with a "random" initial x0. Once you have found a good approximation
* for v1, you consider B=A1|v1|2 * v1vT (this simple step replaces the "rewrite in terms of this basis" above). This is a matrix that behaves like A for anything
* orthogonal to v1 and zeroes out v1. Use the power method on B again, which will reveal v2, an eigenvector of a largest eigenvalue of B.
* Then switch to C=B2|v2|2 * v2vT so on.
*/
Matrix<float>b_k(_param.X_n, 1);
Matrix<float>b_k1(_param.X_n, 1);
for(int i = 0; i < _param.k; i ++){
//Set aside vector of randoms (n x 1)
b_k.random(i);
float previous_eigenvalue_estimate = FLT_MAX;
float eigen_value_estimate = FLT_MAX;
while(true){
multiply(M, b_k, b_k1, context);
hipblasSdot(context.cublas_handle, b_k1.rows(), b_k1.data(), 1.0, b_k.data(), 1.0, &eigen_value_estimate);
if(std::abs(eigen_value_estimate - previous_eigenvalue_estimate) <= (_param.tol * std::abs(previous_eigenvalue_estimate))) {
break;
}
normalize_vector_cublas(b_k1, context);
b_k.copy(b_k1);
previous_eigenvalue_estimate = eigen_value_estimate;
}
//Obtain eigen value
w_temp[i] = eigen_value_estimate;
//Put eigen vector into Q (starting at last column of Q)
thrust::copy(b_k.dptr(), b_k.dptr()+b_k.size(), Q.dptr()+Q.rows()*(Q.columns()-i-1));
//Get rid of eigen effect from original matrix (deflation)
multiply(A, 0.0, context);
outer_product(A, eigen_value_estimate, b_k, b_k, context);
subtract(M, A, M, context);
}
//Fill in w from vector w_temp
std::reverse(w_temp.begin(), w_temp.end());
w.copy(w_temp.data());
//Get tsvd attributes
get_tsvd_attr(X, Q, _Q, w, _w, _U, _explained_variance, _explained_variance_ratio, _param, context);
}
/**
* Conduct truncated SVD on a matrix
*
* @param _X
* @param _Q
* @param _w
* @param _U
* @param _explained_variance
* @param _explained_variance_ratio
* @param _param
*/
void truncated_svd(const double* _X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param)
{
Matrix<float>X(_param.X_m, _param.X_n);
X.copy(_X);
truncated_svd_matrix(X, _Q, _w, _U, _explained_variance, _explained_variance_ratio, _param);
}
void truncated_svd_matrix(Matrix<float> &X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param)
{
std::string algorithm(_param.algorithm);
try
{
if(algorithm == "cusolver"){
cusolver_tsvd(X, _Q, _w, _U, _explained_variance, _explained_variance_ratio, _param);
} else {
power_tsvd(X, _Q, _w, _U, _explained_variance, _explained_variance_ratio, _param);
}
}
catch (const std::exception &e)
{
std::cerr << "tsvd error: " << e.what() << "\n";
}
catch (std::string e)
{
std::cerr << "tsvd error: " << e << "\n";
}
catch (...)
{
std::cerr << "tsvd error\n";
}
}
}
| df68a2237769119a38098b1de90c42c5ff70e601.cu | #include <cstdio>
#include "cuda_runtime.h"
#include "utils.cuh"
#include "../device/device_context.cuh"
#include "tsvd.h"
#include <ctime>
#include <thrust/iterator/counting_iterator.h>
#include<algorithm>
#include <thrust/sequence.h>
#include <thrust/inner_product.h>
#include <thrust/transform_reduce.h>
namespace tsvd
{
/**
* Division utility to get explained variance ratio
*
* @param XVar
* @param XVarSum
* @param ExplainedVarRatio
* @param context
*/
void divide(const Matrix<float> &XVar, const Matrix<float> &XVarSum, Matrix<float> &ExplainedVarRatio, DeviceContext &context){
auto d_x_var = XVar.data();
auto d_x_var_sum = XVarSum.data();
auto d_expl_var_ratio = ExplainedVarRatio.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+ExplainedVarRatio.size(), [=]__device__(int idx){
float div_val = 0.0;
//XVarSum can possibly be zero
if(d_x_var_sum[0] != 0.0){
div_val = d_x_var[idx] / d_x_var_sum[0];
}
d_expl_var_ratio[idx] = div_val;
} );
}
/**
* Square each value in a matrix
*
* @param UmultSigma
* @param UmultSigmaSquare
* @param context
*/
void square_val(const Matrix<float> &UmultSigma, Matrix<float> &UmultSigmaSquare, DeviceContext &context){
auto n = UmultSigma.columns();
auto m = UmultSigma.rows();
auto k = UmultSigmaSquare.rows();
auto d_u_mult_sigma = UmultSigma.data();
auto d_u_mult_sigma_square = UmultSigmaSquare.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+UmultSigmaSquare.size(), [=]__device__(int idx){
float square_val = std::pow(d_u_mult_sigma[idx],2);
d_u_mult_sigma_square[idx] = square_val;
} );
}
/**
* Alternative variance calculation (Can be slow for big matrices)
*
* @param UmultSigma
* @param k
* @param UmultSigmaVar
* @param context
*/
void calc_var(const Matrix<float>UmultSigma, int k, Matrix<float> &UmultSigmaVar, DeviceContext &context){
//Set aside matrix of 1's for getting columnar sums(t(UmultSima) * UmultOnes)
Matrix<float>UmultOnes(UmultSigma.rows(), 1);
UmultOnes.fill(1.0f);
//Allocate matrices for variance calculation
Matrix<float>UmultSigmaSquare(UmultSigma.rows(), UmultSigma.columns());
Matrix<float>UmultSigmaSum(k, 1);
Matrix<float>UmultSigmaSumSquare(k, 1);
Matrix<float>UmultSigmaSumOfSquare(k, 1);
Matrix<float>UmultSigmaVarNum(k, 1);
//Calculate Variance
square_val(UmultSigma, UmultSigmaSquare, context);
multiply(UmultSigmaSquare, UmultOnes, UmultSigmaSumOfSquare, context, true, false, 1.0f);
multiply(UmultSigma, UmultOnes, UmultSigmaSum, context, true, false, 1.0f);
square_val(UmultSigmaSum, UmultSigmaSumSquare, context);
//Get rows
auto m = UmultSigma.rows();
multiply(UmultSigmaSumOfSquare, m, context);
subtract(UmultSigmaSumOfSquare, UmultSigmaSumSquare, UmultSigmaVarNum, context);
auto d_u_sigma_var_num = UmultSigmaVarNum.data();
auto d_u_sigma_var = UmultSigmaVar.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+UmultSigmaVar.size(), [=]__device__(int idx){
float div_val = d_u_sigma_var_num[idx]/(std::pow(m,2));
d_u_sigma_var[idx] = div_val;
} );
}
template<typename T>
class variance_iterator{
public:
// Required iterator traits
typedef variance_iterator<T> self_type; ///< My own type
typedef size_t difference_type; ///< Type to express the result of subtracting one iterator from another
typedef T value_type; ///< The type of the element the iterator can point to
typedef T* pointer; ///< The type of a pointer to an element the iterator can point to
typedef T reference; ///< The type of a reference to an element the iterator can point to
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
const T* data_ptr;
const T* mean_ptr;
const int col_rows;
size_t offset;
__device__ T operator[](size_t idx){
idx = idx + offset;
T mean = mean_ptr[idx/col_rows];
T dev_square = pow((data_ptr[idx] - mean),2);
return dev_square;
}
__device__ self_type operator+(size_t idx){
self_type retval(data_ptr, mean_ptr, col_rows);
retval.offset += idx;
return retval;
}
__host__ __device__ variance_iterator(const T* data_ptr, const T* mean_ptr, const int col_rows):data_ptr(data_ptr), mean_ptr(mean_ptr), col_rows(col_rows), offset(0){
}
};
/**
* Utility to calculate variance for each column of a matrix
*
* @param X
* @param UColMean
* @param UVar
* @param context
*/
void calc_var_numerator(const Matrix<float> &X, const Matrix<float> &UColMean, Matrix<float> &UVar, DeviceContext &context){
auto m = X.rows();
variance_iterator<float> variance(X.data(), UColMean.data(), m);
thrust::device_vector<int> segments(X.columns() + 1);
thrust::sequence(segments.begin(), segments.end(), 0, static_cast<int>(X.rows()));
// Determine temporary device storage requirements
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
int cols = static_cast<int>(X.columns());
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(),
cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1));
// Allocate temporary storage
safe_cuda(cudaMalloc(&d_temp_storage, temp_storage_bytes));
// Run sum-reduction
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(),
cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1));
safe_cuda(cudaFree(d_temp_storage));
}
/**
* Utility to reverse q to show most import k to least important k
*
* @param Q
* @param QReversed
* @param context
*/
void col_reverse_q(const Matrix<float> &Q, Matrix<float> &QReversed, DeviceContext &context){
auto n = Q.columns();
auto m = Q.rows();
auto k = QReversed.rows();
auto d_q = Q.data();
auto d_q_reversed = QReversed.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+QReversed.size(), [=]__device__(int idx){
int dest_row = idx % m;
int dest_col = idx/m;
int src_row = dest_row;
int src_col = (n - dest_col) - 1;
d_q_reversed[idx] = d_q[src_col * m + src_row];
} );
}
/**
* Truncate Q transpose to top k
*
* @param Qt
* @param QtTrunc
* @param context
*/
void row_reverse_trunc_q(const Matrix<float> &Qt, Matrix<float> &QtTrunc, DeviceContext &context){
auto m = Qt.rows();
auto k = QtTrunc.rows();
auto d_q = Qt.data();
auto d_q_trunc = QtTrunc.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+QtTrunc.size(), [=]__device__(int idx){
int dest_row = idx % k;
int dest_col = idx / k;
int src_row = (m - dest_row) - 1;
int src_col = dest_col;
float q = d_q[src_col * m + src_row];
d_q_trunc[idx] = q;
} );
}
/**
* Calculate the U matrix, which is defined as:
* U = A*V/sigma where A is our X Matrix, V is Q, and sigma is 1/w_i
*
* @param X
* @param Q
* @param w
* @param U
* @param context
*/
void calculate_u(const Matrix<float> &X, const Matrix<float> &Q, const Matrix<float> &w, Matrix<float> &U, DeviceContext &context){
multiply(X, Q, U, context, false, false, 1.0f); //A*V
auto d_u = U.data();
auto d_sigma = w.data();
auto column_size = U.rows();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+U.size(), [=]__device__(int idx){
int column = idx/column_size;
float sigma = d_sigma[column];
float u = d_u[idx];
if(sigma != 0.0){
d_u[idx] = u * 1.0/sigma;
} else{
d_u[idx] = 0.0;
}
} );
}
/**
* Obtain SVD attributes, which are as follows:
* 1.Singular Values
* 2.U matrix
* 3.Explained Variance
* 4.Explained Variance Ratio
*/
void get_tsvd_attr(Matrix<float> &X, Matrix<float> &Q, double* _Q, Matrix<float> &w, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param, DeviceContext &context){
//Obtain Q^T to obtain vector as row major order
Matrix<float>Qt(Q.columns(), Q.rows());
transpose(Q, Qt, context); //Needed for calculate_u()
Matrix<float>QtTrunc(_param.k, Qt.columns());
row_reverse_trunc_q(Qt, QtTrunc, context);
QtTrunc.copy_to_host(_Q); //Send to host
//Obtain square root of eigenvalues, which are singular values
w.transform([=]__device__(float elem){
if(elem > 0.0){
return std::sqrt(elem);
}else{
return 0.0f;
}
}
);
//Sort from biggest singular value to smallest
std::vector<double> w_temp(w.size());
w.copy_to_host(w_temp.data()); //Send to host
std::reverse(w_temp.begin(), w_temp.end());
std::copy(w_temp.begin(), w_temp.begin() + _param.k, _w);
Matrix<float>sigma(_param.k, 1);
sigma.copy(w_temp.data());
//Get U matrix
Matrix<float>U(X.rows(), _param.k);
Matrix<float>QReversed(Q.rows(), Q.columns());
col_reverse_q(Q, QReversed, context);
calculate_u(X, QReversed, sigma, U, context);
U.copy_to_host(_U); //Send to host
//Explained Variance
Matrix<float>UmultSigma(U.rows(), U.columns());
//U * Sigma
multiply_diag(U, sigma, UmultSigma, context, false);
Matrix<float>UOnesSigma(UmultSigma.rows(), 1);
UOnesSigma.fill(1.0f);
Matrix<float>USigmaVar(_param.k, 1);
Matrix<float>USigmaColMean(_param.k, 1);
multiply(UmultSigma, UOnesSigma, USigmaColMean, context, true, false, 1.0f);
float m_usigma = UmultSigma.rows();
multiply(USigmaColMean, 1/m_usigma, context);
calc_var_numerator(UmultSigma, USigmaColMean, USigmaVar, context);
multiply(USigmaVar, 1/m_usigma, context);
USigmaVar.copy_to_host(_explained_variance);
//Explained Variance Ratio
//Set aside matrix of 1's for getting sum of columnar variances
Matrix<float>XmultOnes(X.rows(), 1);
XmultOnes.fill(1.0f);
Matrix<float>XVar(X.columns(), 1);
Matrix<float>XColMean(X.columns(), 1);
multiply(X, XmultOnes, XColMean, context, true, false, 1.0f);
float m = X.rows();
multiply(XColMean, 1/m, context);
calc_var_numerator(X, XColMean, XVar, context);
multiply(XVar, 1/m, context);
Matrix<float>XVarSum(1,1);
multiply(XVar, XmultOnes, XVarSum, context, true, false, 1.0f);
Matrix<float>ExplainedVarRatio(_param.k, 1);
divide(USigmaVar, XVarSum, ExplainedVarRatio, context);
ExplainedVarRatio.copy_to_host(_explained_variance_ratio);
}
/**
* Conduct truncated svd using cusolverDnSsyevd
*
* @param X
* @param _Q
* @param _w
* @param _U
* @param _explained_variance
* @param _explained_variance_ratio
* @param _param
*/
void cusolver_tsvd(Matrix<float> &X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param){
//Allocate matrix for X^TX
Matrix<float>XtX(_param.X_n, _param.X_n);
//Create context
DeviceContext context;
//Multiply X and Xt and output result to XtX
multiply(X, X, XtX, context, true, false, 1.0f);
//Set up Q (V^T) and w (singular value) matrices (w is a matrix of size Q.rows() by 1; really just a vector
Matrix<float>Q(XtX.rows(), XtX.columns()); // n X n -> V^T
Matrix<float>w(Q.rows(), 1);
calculate_eigen_pairs_exact(XtX, Q, w, context);
//Get tsvd attributes
get_tsvd_attr(X, Q, _Q, w, _w, _U, _explained_variance, _explained_variance_ratio, _param, context);
}
/**
* Conduct truncated svd using the power method
*
* @param X
* @param _Q
* @param _w
* @param _U
* @param _explained_variance
* @param _explained_variance_ratio
* @param _param
*/
void power_tsvd(Matrix<float> &X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param){
//Allocate matrix for X^TX
Matrix<float>M(_param.X_n, _param.X_n);
//Allocate matrix to be used in cublasSger outer product calculation
Matrix<float>A(M.rows(), M.columns());
//Create context
DeviceContext context;
//Multiply X and Xt and output result to XtX
multiply(X, X, M, context, true, false, 1.0f);
//Set up Q (V^T) and w (singular value) matrices (w is a matrix of size Q.rows() by 1; really just a vector
Matrix<float>Q(M.rows(), _param.k);
Matrix<float>w(_param.k, 1);
std::vector<double> w_temp(w.size());
/* Power Method for finding all eigenvectors/eigen values
* Logic:
*
* Since the matrix is symmetric, there exists an orthogonal basis of eigenvectors. Once you have found an eigenvector, extend it to an orthogonal basis,
* rewrite the matrix in terms of this basis and restrict to the orthogonal space of the known eigenvector.
* This is comprised in the method of deflation:
* You first find the first eigenvector v1 (with a maximal λ1), by iterating xn+1=Axn/|Axn|with a "random" initial x0. Once you have found a good approximation
* for v1, you consider B=A−λ1|v1|2 * v1vT (this simple step replaces the "rewrite in terms of this basis" above). This is a matrix that behaves like A for anything
* orthogonal to v1 and zeroes out v1. Use the power method on B again, which will reveal v2, an eigenvector of a largest eigenvalue of B.
* Then switch to C=B−λ2|v2|2 * v2vT so on.
*/
Matrix<float>b_k(_param.X_n, 1);
Matrix<float>b_k1(_param.X_n, 1);
for(int i = 0; i < _param.k; i ++){
//Set aside vector of randoms (n x 1)
b_k.random(i);
float previous_eigenvalue_estimate = FLT_MAX;
float eigen_value_estimate = FLT_MAX;
while(true){
multiply(M, b_k, b_k1, context);
cublasSdot(context.cublas_handle, b_k1.rows(), b_k1.data(), 1.0, b_k.data(), 1.0, &eigen_value_estimate);
if(std::abs(eigen_value_estimate - previous_eigenvalue_estimate) <= (_param.tol * std::abs(previous_eigenvalue_estimate))) {
break;
}
normalize_vector_cublas(b_k1, context);
b_k.copy(b_k1);
previous_eigenvalue_estimate = eigen_value_estimate;
}
//Obtain eigen value
w_temp[i] = eigen_value_estimate;
//Put eigen vector into Q (starting at last column of Q)
thrust::copy(b_k.dptr(), b_k.dptr()+b_k.size(), Q.dptr()+Q.rows()*(Q.columns()-i-1));
//Get rid of eigen effect from original matrix (deflation)
multiply(A, 0.0, context);
outer_product(A, eigen_value_estimate, b_k, b_k, context);
subtract(M, A, M, context);
}
//Fill in w from vector w_temp
std::reverse(w_temp.begin(), w_temp.end());
w.copy(w_temp.data());
//Get tsvd attributes
get_tsvd_attr(X, Q, _Q, w, _w, _U, _explained_variance, _explained_variance_ratio, _param, context);
}
/**
* Conduct truncated SVD on a matrix
*
* @param _X
* @param _Q
* @param _w
* @param _U
* @param _explained_variance
* @param _explained_variance_ratio
* @param _param
*/
void truncated_svd(const double* _X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param)
{
Matrix<float>X(_param.X_m, _param.X_n);
X.copy(_X);
truncated_svd_matrix(X, _Q, _w, _U, _explained_variance, _explained_variance_ratio, _param);
}
void truncated_svd_matrix(Matrix<float> &X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param)
{
std::string algorithm(_param.algorithm);
try
{
if(algorithm == "cusolver"){
cusolver_tsvd(X, _Q, _w, _U, _explained_variance, _explained_variance_ratio, _param);
} else {
power_tsvd(X, _Q, _w, _U, _explained_variance, _explained_variance_ratio, _param);
}
}
catch (const std::exception &e)
{
std::cerr << "tsvd error: " << e.what() << "\n";
}
catch (std::string e)
{
std::cerr << "tsvd error: " << e << "\n";
}
catch (...)
{
std::cerr << "tsvd error\n";
}
}
}
|
78db71a1c3c0d4d8838f25bb8c810a22b66b6140.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void addValue_i32 (int* vector, int value, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] + value;
}
} | 78db71a1c3c0d4d8838f25bb8c810a22b66b6140.cu | #include "includes.h"
__global__ void addValue_i32 (int* vector, int value, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] + value;
}
} |
03cc977a20fe291ae594e392c2edf603f269cbe0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Sorted segments ops implementations
template <typename T, typename I>
static bool segmentIndicesValidate_(NDArray* indices, NDArray& aexpected, NDArray& aoutput) {
return true;
}
bool segmentIndicesValidate(nd4j::LaunchContext* context , NDArray* indices, NDArray& expected, NDArray& output) {
BUILD_DOUBLE_SELECTOR(output.dataType(), indices->dataType(), return segmentIndicesValidate_, (indices, expected, output), NUMERIC_TYPES, INDEXING_TYPES);
}
// -------------------------------------------------------------------------------------------------------------- //
// Unsorted segment ops functors implementation
// -------------------------------------------------------------------------------------------------------------- //
template <typename I>
static __global__ void unsortedSegmentIndexValidateKernel(I* indices, Nd4jLong* indicesShape, I expected, I* found) {
__shared__ bool onlyTrue;
__shared__ Nd4jLong len;
if (threadIdx.x == 0) {
onlyTrue = true;
len = shape::length(indicesShape);
}
__syncthreads();
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = gridDim.x * blockDim.x;
for (int e = start; e < len && onlyTrue; e += step) {
nd4j::math::atomics::nd4j_atomicMax(found, indices[e]);
if (expected < *found)
onlyTrue = false;
}
}
template <typename I>
static bool unsortedSegmentIndicesValidate_(nd4j::LaunchContext* context , NDArray* indices, Nd4jLong expected, Nd4jLong& output) {
output = expected;
I found = output;
I exp = expected;
auto stream = context->getCudaStream();
I* devFound;
hipMalloc(&devFound, sizeof(I));
hipMemcpy(devFound, &found, sizeof(I), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( unsortedSegmentIndexValidateKernel<I>), dim3(1), dim3(indices->lengthOf()), 128, *stream, reinterpret_cast<I*>(indices->specialBuffer()), indices->specialShapeInfo(), exp, devFound);
hipMemcpy(&found, devFound, sizeof(I), hipMemcpyDeviceToHost);
hipFree(devFound);
output = found;
return expected == output;
}
bool unsortedSegmentIndicesValidate(nd4j::LaunchContext* context , NDArray* indices, Nd4jLong expected, Nd4jLong& output) {
BUILD_SINGLE_SELECTOR(indices->dataType(), return unsortedSegmentIndicesValidate_, (context, indices, expected, output), INDEXING_TYPES);
}
// -------------------------------------------------------------------------------------------------------------- //
// -------------------------------------------------------------------------------------------------------------- //
// fill up segments starts and ends - splitted ordered case
template <typename I>
static __global__ void fillUpSegmentsKernel(void* indices, Nd4jLong* indexShape, int numClasses, int* classesRangesStart, int* classesRangesLenghts) {
__shared__ I* idxBuf;
__shared__ Nd4jLong idxLen;
__shared__ int* result;
if (threadIdx.x == 0) {
idxBuf = reinterpret_cast<I*>(indices);
idxLen = shape::length(indexShape);
}
__syncthreads();
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto j = tid; j < idxLen; j += step) {
auto pos = idxBuf[j];
nd4j::math::atomics::nd4j_atomicMin<int>(&classesRangesStart[pos], (int)j);
nd4j::math::atomics::nd4j_atomicAdd<int>(&classesRangesLenghts[pos], 1);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename I>
static void fillUpSegments_(NDArray* indices, Nd4jLong numClasses, NDArray& classesRangesBegs, NDArray& classesRangesLens) {
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
int* begins = reinterpret_cast<int*>(classesRangesBegs.getSpecialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.getSpecialBuffer());
auto stream = classesRangesBegs.getContext()->getCudaStream();
hipLaunchKernelGGL(( fillUpSegmentsKernel<I>), dim3(dims.x), dim3(dims.y), dims.z, *stream , indices->specialBuffer(), indices->specialShapeInfo(), numClasses, begins, lengths);
}
// -------------------------------------------------------------------------------------------------------------- //
void fillUpSegments(NDArray* indices, Nd4jLong numClasses, NDArray& classesRangesBegs, NDArray& classesRangesLens) {
BUILD_SINGLE_SELECTOR(indices->dataType(), fillUpSegments_, (indices, numClasses, classesRangesBegs, classesRangesLens), INDEXING_TYPES);
}
// -------------------------------------------------------------------------------------------------------------- //
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// -------------------------------------------------------------------------------------------------------------- //
| 03cc977a20fe291ae594e392c2edf603f269cbe0.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Sorted segments ops implementations
template <typename T, typename I>
static bool segmentIndicesValidate_(NDArray* indices, NDArray& aexpected, NDArray& aoutput) {
return true;
}
bool segmentIndicesValidate(nd4j::LaunchContext* context , NDArray* indices, NDArray& expected, NDArray& output) {
BUILD_DOUBLE_SELECTOR(output.dataType(), indices->dataType(), return segmentIndicesValidate_, (indices, expected, output), NUMERIC_TYPES, INDEXING_TYPES);
}
// -------------------------------------------------------------------------------------------------------------- //
// Unsorted segment ops functors implementation
// -------------------------------------------------------------------------------------------------------------- //
template <typename I>
static __global__ void unsortedSegmentIndexValidateKernel(I* indices, Nd4jLong* indicesShape, I expected, I* found) {
__shared__ bool onlyTrue;
__shared__ Nd4jLong len;
if (threadIdx.x == 0) {
onlyTrue = true;
len = shape::length(indicesShape);
}
__syncthreads();
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = gridDim.x * blockDim.x;
for (int e = start; e < len && onlyTrue; e += step) {
nd4j::math::atomics::nd4j_atomicMax(found, indices[e]);
if (expected < *found)
onlyTrue = false;
}
}
template <typename I>
static bool unsortedSegmentIndicesValidate_(nd4j::LaunchContext* context , NDArray* indices, Nd4jLong expected, Nd4jLong& output) {
output = expected;
I found = output;
I exp = expected;
auto stream = context->getCudaStream();
I* devFound;
cudaMalloc(&devFound, sizeof(I));
cudaMemcpy(devFound, &found, sizeof(I), cudaMemcpyHostToDevice);
unsortedSegmentIndexValidateKernel<I><<<1, indices->lengthOf(), 128, *stream>>>(reinterpret_cast<I*>(indices->specialBuffer()), indices->specialShapeInfo(), exp, devFound);
cudaMemcpy(&found, devFound, sizeof(I), cudaMemcpyDeviceToHost);
cudaFree(devFound);
output = found;
return expected == output;
}
bool unsortedSegmentIndicesValidate(nd4j::LaunchContext* context , NDArray* indices, Nd4jLong expected, Nd4jLong& output) {
BUILD_SINGLE_SELECTOR(indices->dataType(), return unsortedSegmentIndicesValidate_, (context, indices, expected, output), INDEXING_TYPES);
}
// -------------------------------------------------------------------------------------------------------------- //
// -------------------------------------------------------------------------------------------------------------- //
// fill up segments starts and ends - splitted ordered case
template <typename I>
static __global__ void fillUpSegmentsKernel(void* indices, Nd4jLong* indexShape, int numClasses, int* classesRangesStart, int* classesRangesLenghts) {
__shared__ I* idxBuf;
__shared__ Nd4jLong idxLen;
__shared__ int* result;
if (threadIdx.x == 0) {
idxBuf = reinterpret_cast<I*>(indices);
idxLen = shape::length(indexShape);
}
__syncthreads();
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto j = tid; j < idxLen; j += step) {
auto pos = idxBuf[j];
nd4j::math::atomics::nd4j_atomicMin<int>(&classesRangesStart[pos], (int)j);
nd4j::math::atomics::nd4j_atomicAdd<int>(&classesRangesLenghts[pos], 1);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename I>
static void fillUpSegments_(NDArray* indices, Nd4jLong numClasses, NDArray& classesRangesBegs, NDArray& classesRangesLens) {
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
int* begins = reinterpret_cast<int*>(classesRangesBegs.getSpecialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.getSpecialBuffer());
auto stream = classesRangesBegs.getContext()->getCudaStream();
fillUpSegmentsKernel<I><<<dims.x, dims.y, dims.z, *stream >>>(indices->specialBuffer(), indices->specialShapeInfo(), numClasses, begins, lengths);
}
// -------------------------------------------------------------------------------------------------------------- //
void fillUpSegments(NDArray* indices, Nd4jLong numClasses, NDArray& classesRangesBegs, NDArray& classesRangesLens) {
BUILD_SINGLE_SELECTOR(indices->dataType(), fillUpSegments_, (indices, numClasses, classesRangesBegs, classesRangesLens), INDEXING_TYPES);
}
// -------------------------------------------------------------------------------------------------------------- //
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// -------------------------------------------------------------------------------------------------------------- //
|
64ac5cab473416223688e41a6988f600dcb32382.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "CUDADiffusion.hh"
//#include "DiffusionUtils.hh"
//#include "SymmetricTensor.hh"
//#include <vector>
//#include <map>
//#include "options.h"
//#include "cudautil.h"
#include <stdio.h>
#define XTILE 20
typedef double Real;
__global__ void diff_6face_v1(const Real* d_psi, Real* d_npsi, const Real* d_sigmaX, const Real* d_sigmaY, const Real* d_sigmaZ,int Lii, int Ljj, int Lkk)
{
//map z dir to threads
//z is the fastest varying direction
//2d decomposition
//32x32 in y z direction
__shared__ Real sm_psi[4][32][32]; //32 KB
#define V0(y,z) sm_psi[pii][y][z]
#define V1(y,z) sm_psi[cii][y][z]
#define V2(y,z) sm_psi[nii][y][z]
#define sigmaX(x,y,z,dir) d_sigmaX[ z + Lkk * ( y + Ljj * ( x + Lii * dir ) ) ]
#define sigmaY(x,y,z,dir) d_sigmaY[ z + Lkk * ( y + Ljj * ( x + Lii * dir ) ) ]
#define sigmaZ(x,y,z,dir) d_sigmaZ[ z + Lkk * ( y + Ljj * ( x + Lii * dir ) ) ]
#define psi(x,y,z) d_psi[ z + Lkk * ( (y) + Ljj * (x) ) ]
#define npsi(x,y,z) d_npsi[ z + Lkk * ( (y) + Ljj * (x) ) ]
int tjj = threadIdx.y;
int tkk = threadIdx.x;
//shift for each tile
// d_psi += 30 * blockIdx.x + Lkk * ( 30 * blockIdx.y );
// d_npsi += 30 * blockIdx.x + Lkk * ( 30 * blockIdx.y );
d_psi = &(psi(XTILE*blockIdx.x, 30*blockIdx.y, 30*blockIdx.z));
d_npsi = &(npsi(XTILE*blockIdx.x, 30*blockIdx.y, 30*blockIdx.z));
d_sigmaX = &(sigmaX(XTILE*blockIdx.x, 30*blockIdx.y, 30*blockIdx.z, 0));
d_sigmaY = &(sigmaY(XTILE*blockIdx.x, 30*blockIdx.y, 30*blockIdx.z, 0));
d_sigmaZ = &(sigmaZ(XTILE*blockIdx.x, 30*blockIdx.y, 30*blockIdx.z, 0));
int Last_x=XTILE+1; int nLast_y=31; int nLast_z=31;
if (blockIdx.x == gridDim.x-1) Last_x = Lii-2 - XTILE * blockIdx.x + 1;
if (blockIdx.y == gridDim.y-1) nLast_y = Ljj-2 - 30 * blockIdx.y + 1;
if (blockIdx.z == gridDim.z-1) nLast_z = Lkk-2 - 30 * blockIdx.z + 1;
// if (blockIdx.x==0 && blockIdx.y==0 && blockIdx.z==0) printf("b(%d,%d,%d) t(%d,%d,%d) LastX:%d nLast_y:%d nLast_z:%d %p %p %p %p %p\n",blockIdx.x,blockIdx.y,blockIdx.z,threadIdx.x,threadIdx.y,threadIdx.z,Last_x,nLast_y,nLast_z,&(psi(0,tjj,tkk)),&(npsi(0,tjj,tkk)),&(sigmaX(0,tjj,tkk,0)),&(sigmaY(0,tjj,tkk,0)),&(sigmaZ(0,tjj,tkk,0)));
if(tjj>nLast_y) return;
if(tkk>nLast_z) return;
// d_sigmaX += 30 * blockIdx.x + (Lkk-2) * ( 31 * blockIdx.y );
// d_sigmaY += 30 * blockIdx.x + (Lkk-2) * ( 31 * blockIdx.y );
// d_sigmaZ += 31 * blockIdx.x + (Lkk-1) * ( 31 * blockIdx.y );
// printf("tjj tkk bx by = %d %d %d %d\n",tjj,tkk,blockIdx.x,blockIdx.y);
int pii,cii,nii,tii;
pii=0; cii=1; nii=2;
sm_psi[cii][tjj][tkk] = psi(0,tjj,tkk);
sm_psi[nii][tjj][tkk] = psi(1,tjj,tkk);
Real xcharge,ycharge,zcharge,dV;
__syncthreads();
//initial
if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y))
{
Real xd=-V1(tjj,tkk) + V2(tjj,tkk);
Real yd=(-V1(-1 + tjj,tkk) + V1(1 + tjj,tkk) - V2(-1 + tjj,tkk) + V2(1 + tjj,tkk))/4.;
Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V2(tjj,-1 + tkk) + V2(tjj,1 + tkk))/4.;
#ifdef flux_dump
printf("x=%d y=%d z=%d xflux=(%f,%f,%f)\n",XTILE*blockIdx.x,30*blockIdx.y+tjj,30*blockIdx.z+tkk,xd,yd,zd);
#endif
dV = 0;
dV -= sigmaX(0,tjj,tkk,0) * xd + sigmaX(0,tjj,tkk,1) * yd + sigmaX(0,tjj,tkk,2) * zd ;
}
tii=pii; pii=cii; cii=nii; nii=tii;
for(int ii=1;ii<Last_x;ii++)
{
sm_psi[nii][tjj][tkk] = psi(ii+1,tjj,tkk);
__syncthreads();
// contribution to (ii-1)
// use link loaded previous
// y face current
// tjj=0 calc face at 0-1 and tjj=30 calc face at 30-31
if ((tkk>0) && (tkk<nLast_z) && (tjj<nLast_y))
{
Real xd=(-V0(tjj,tkk) - V0(1 + tjj,tkk) + V2(tjj,tkk) + V2(1 + tjj,tkk))/4.;
Real yd=-V1(tjj,tkk) + V1(1 + tjj,tkk);
Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V1(1 + tjj,-1 + tkk) + V1(1 + tjj,1 + tkk))/4.;
#ifdef flux_dump
printf("x=%d y=%d z=%d yflux=(%f,%f,%f)\n",XTILE*blockIdx.x+ii,30*blockIdx.y+tjj,30*blockIdx.z+tkk,xd,yd,zd);
#endif
ycharge = sigmaY(ii,tjj,tkk,0) * xd + sigmaY(ii,tjj,tkk,1) * yd + sigmaY(ii,tjj,tkk,2) * zd ;
dV += ycharge;
sm_psi[3][tjj][tkk]=ycharge;
}
__syncthreads();
if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y))
dV -= sm_psi[3][tjj-1][tkk]; //bring from left
__syncthreads();
// z face current
// tkk=0 calc face at 0-1 and tkk=30 calc face at 30-31
if ((tkk<nLast_z) && (tjj>0) && (tjj<nLast_y))
{
Real xd=(-V0(tjj,tkk) - V0(tjj,1 + tkk) + V2(tjj,tkk) + V2(tjj,1 + tkk))/4.;
Real yd=(-V1(-1 + tjj,tkk) - V1(-1 + tjj,1 + tkk) + V1(1 + tjj,tkk) + V1(1 + tjj,1 + tkk))/4.;
Real zd=-V1(tjj,tkk) + V1(tjj,1 + tkk);
#ifdef flux_dump
printf("x=%d y=%d z=%d zflux=(%f,%f,%f)\n",XTILE*blockIdx.x+ii,30*blockIdx.y+tjj,30*blockIdx.z+tkk,xd,yd,zd);
#endif
zcharge = sigmaZ(ii,tjj,tkk,0) * xd + sigmaZ(ii,tjj,tkk,1) * yd + sigmaZ(ii,tjj,tkk,2) * zd ;
dV += zcharge;
sm_psi[3][tjj][tkk]=zcharge;
}
__syncthreads();
if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y))
dV -= sm_psi[3][tjj][tkk-1];
//__syncthreads();
// x face current
if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y))
{
Real xd=-V1(tjj,tkk) + V2(tjj,tkk);
Real yd=(-V1(-1 + tjj,tkk) + V1(1 + tjj,tkk) - V2(-1 + tjj,tkk) + V2(1 + tjj,tkk))/4.;
Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V2(tjj,-1 + tkk) + V2(tjj,1 + tkk))/4.;
#ifdef flux_dump
printf("x=%d y=%d z=%d xflux=(%f,%f,%f)\n",XTILE*blockIdx.x+ii,30*blockIdx.y+tjj,30*blockIdx.z+tkk,xd,yd,zd);
#endif
xcharge = sigmaX(ii,tjj,tkk,0) * xd + sigmaX(ii,tjj,tkk,1) * yd + sigmaX(ii,tjj,tkk,2) * zd ;
dV += xcharge;
//store dV
npsi(ii,tjj,tkk) = dV;
dV = -xcharge; //pass to the next cell in x-dir
}
tii=pii; pii=cii; cii=nii; nii=tii;
}
// #undef V0(y,z)
// #undef V1(y,z)
// #undef V2(y,z)
// #undef sigmaX(x,y,z,dir)
// #undef sigmaY(x,y,z,dir)
// #undef sigmaZ(x,y,z,dir)
// #undef psi(x,y,z)
// #undef npsi(x,y,z)
}
__global__ void map_dVm(double * dVmT, double* dVm, const int *remap,int nCells)
{
int idx0 = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x * gridDim.x;
for(int idx = idx0 ; idx<nCells ; idx+=stride)
dVmT[idx] = dVm[remap[idx]];
}
//__global__ void map_V(double * VT, double* V, const int *remap,int nCells)
//{
// int idx0 = threadIdx.x + blockDim.x*blockIdx.x;
// int stride = blockDim.x * gridDim.x;
// for(int idx = idx0 ; idx<nCells ; idx+=stride)
// VT[remap[idx]] = V[idx];
//}
extern "C"
{
void call_cuda_kernels(const Real *VmRaw, Real *dVmRaw, const Real *sigmaRaw, int nx, int ny, int nz, Real *dVmOut, const int *lookup,int nCells)
{
//determine block dim
//1. blockdim.z and blockdim.y are determined in a simple way.
int bdimz = (int)((nz-2)/30) + ((nz-2)%30==0?0:1);
int bdimy = (int)((ny-2)/30) + ((ny-2)%30==0?0:1);
int bdimx = (int)((nx-2)/XTILE) + ((nx-2)%XTILE==0?0:1);
// printf("Vm=%p dVm=%p sigma=%p \n",VmRaw,dVmRaw,sigmaRaw);
// printf("call_cuda_kernels %d,%d,%d (%d,%d,%d)\n",nx,ny,nz,bdimx,bdimy,bdimz);
#ifdef GPU_SM_70
hipFuncSetAttribute(diff_6face_v1, hipFuncAttributePreferredSharedMemoryCarveout, 50);
#endif
//map_V<<<112,512>>>(VmBlockRaw,VmRaw,lookup,nCells);
hipLaunchKernelGGL(( diff_6face_v1), dim3(dim3(bdimx,bdimy,bdimz)),dim3(dim3(32,32,1)), 0, 0, VmRaw,dVmRaw,sigmaRaw,sigmaRaw+3*nx*ny*nz,sigmaRaw+6*nx*ny*nz,nx,ny,nz);
hipLaunchKernelGGL(( map_dVm), dim3(112),dim3(512), 0, 0, dVmRaw,dVmOut,lookup,nCells);
}
}
| 64ac5cab473416223688e41a6988f600dcb32382.cu | //#include "CUDADiffusion.hh"
//#include "DiffusionUtils.hh"
//#include "SymmetricTensor.hh"
//#include <vector>
//#include <map>
//#include "options.h"
//#include "cudautil.h"
#include <stdio.h>
#define XTILE 20
typedef double Real;
__global__ void diff_6face_v1(const Real* d_psi, Real* d_npsi, const Real* d_sigmaX, const Real* d_sigmaY, const Real* d_sigmaZ,int Lii, int Ljj, int Lkk)
{
//map z dir to threads
//z is the fastest varying direction
//2d decomposition
//32x32 in y z direction
__shared__ Real sm_psi[4][32][32]; //32 KB
#define V0(y,z) sm_psi[pii][y][z]
#define V1(y,z) sm_psi[cii][y][z]
#define V2(y,z) sm_psi[nii][y][z]
#define sigmaX(x,y,z,dir) d_sigmaX[ z + Lkk * ( y + Ljj * ( x + Lii * dir ) ) ]
#define sigmaY(x,y,z,dir) d_sigmaY[ z + Lkk * ( y + Ljj * ( x + Lii * dir ) ) ]
#define sigmaZ(x,y,z,dir) d_sigmaZ[ z + Lkk * ( y + Ljj * ( x + Lii * dir ) ) ]
#define psi(x,y,z) d_psi[ z + Lkk * ( (y) + Ljj * (x) ) ]
#define npsi(x,y,z) d_npsi[ z + Lkk * ( (y) + Ljj * (x) ) ]
int tjj = threadIdx.y;
int tkk = threadIdx.x;
//shift for each tile
// d_psi += 30 * blockIdx.x + Lkk * ( 30 * blockIdx.y );
// d_npsi += 30 * blockIdx.x + Lkk * ( 30 * blockIdx.y );
d_psi = &(psi(XTILE*blockIdx.x, 30*blockIdx.y, 30*blockIdx.z));
d_npsi = &(npsi(XTILE*blockIdx.x, 30*blockIdx.y, 30*blockIdx.z));
d_sigmaX = &(sigmaX(XTILE*blockIdx.x, 30*blockIdx.y, 30*blockIdx.z, 0));
d_sigmaY = &(sigmaY(XTILE*blockIdx.x, 30*blockIdx.y, 30*blockIdx.z, 0));
d_sigmaZ = &(sigmaZ(XTILE*blockIdx.x, 30*blockIdx.y, 30*blockIdx.z, 0));
int Last_x=XTILE+1; int nLast_y=31; int nLast_z=31;
if (blockIdx.x == gridDim.x-1) Last_x = Lii-2 - XTILE * blockIdx.x + 1;
if (blockIdx.y == gridDim.y-1) nLast_y = Ljj-2 - 30 * blockIdx.y + 1;
if (blockIdx.z == gridDim.z-1) nLast_z = Lkk-2 - 30 * blockIdx.z + 1;
// if (blockIdx.x==0 && blockIdx.y==0 && blockIdx.z==0) printf("b(%d,%d,%d) t(%d,%d,%d) LastX:%d nLast_y:%d nLast_z:%d %p %p %p %p %p\n",blockIdx.x,blockIdx.y,blockIdx.z,threadIdx.x,threadIdx.y,threadIdx.z,Last_x,nLast_y,nLast_z,&(psi(0,tjj,tkk)),&(npsi(0,tjj,tkk)),&(sigmaX(0,tjj,tkk,0)),&(sigmaY(0,tjj,tkk,0)),&(sigmaZ(0,tjj,tkk,0)));
if(tjj>nLast_y) return;
if(tkk>nLast_z) return;
// d_sigmaX += 30 * blockIdx.x + (Lkk-2) * ( 31 * blockIdx.y );
// d_sigmaY += 30 * blockIdx.x + (Lkk-2) * ( 31 * blockIdx.y );
// d_sigmaZ += 31 * blockIdx.x + (Lkk-1) * ( 31 * blockIdx.y );
// printf("tjj tkk bx by = %d %d %d %d\n",tjj,tkk,blockIdx.x,blockIdx.y);
int pii,cii,nii,tii;
pii=0; cii=1; nii=2;
sm_psi[cii][tjj][tkk] = psi(0,tjj,tkk);
sm_psi[nii][tjj][tkk] = psi(1,tjj,tkk);
Real xcharge,ycharge,zcharge,dV;
__syncthreads();
//initial
if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y))
{
Real xd=-V1(tjj,tkk) + V2(tjj,tkk);
Real yd=(-V1(-1 + tjj,tkk) + V1(1 + tjj,tkk) - V2(-1 + tjj,tkk) + V2(1 + tjj,tkk))/4.;
Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V2(tjj,-1 + tkk) + V2(tjj,1 + tkk))/4.;
#ifdef flux_dump
printf("x=%d y=%d z=%d xflux=(%f,%f,%f)\n",XTILE*blockIdx.x,30*blockIdx.y+tjj,30*blockIdx.z+tkk,xd,yd,zd);
#endif
dV = 0;
dV -= sigmaX(0,tjj,tkk,0) * xd + sigmaX(0,tjj,tkk,1) * yd + sigmaX(0,tjj,tkk,2) * zd ;
}
tii=pii; pii=cii; cii=nii; nii=tii;
for(int ii=1;ii<Last_x;ii++)
{
sm_psi[nii][tjj][tkk] = psi(ii+1,tjj,tkk);
__syncthreads();
// contribution to (ii-1)
// use link loaded previous
// y face current
// tjj=0 calc face at 0-1 and tjj=30 calc face at 30-31
if ((tkk>0) && (tkk<nLast_z) && (tjj<nLast_y))
{
Real xd=(-V0(tjj,tkk) - V0(1 + tjj,tkk) + V2(tjj,tkk) + V2(1 + tjj,tkk))/4.;
Real yd=-V1(tjj,tkk) + V1(1 + tjj,tkk);
Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V1(1 + tjj,-1 + tkk) + V1(1 + tjj,1 + tkk))/4.;
#ifdef flux_dump
printf("x=%d y=%d z=%d yflux=(%f,%f,%f)\n",XTILE*blockIdx.x+ii,30*blockIdx.y+tjj,30*blockIdx.z+tkk,xd,yd,zd);
#endif
ycharge = sigmaY(ii,tjj,tkk,0) * xd + sigmaY(ii,tjj,tkk,1) * yd + sigmaY(ii,tjj,tkk,2) * zd ;
dV += ycharge;
sm_psi[3][tjj][tkk]=ycharge;
}
__syncthreads();
if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y))
dV -= sm_psi[3][tjj-1][tkk]; //bring from left
__syncthreads();
// z face current
// tkk=0 calc face at 0-1 and tkk=30 calc face at 30-31
if ((tkk<nLast_z) && (tjj>0) && (tjj<nLast_y))
{
Real xd=(-V0(tjj,tkk) - V0(tjj,1 + tkk) + V2(tjj,tkk) + V2(tjj,1 + tkk))/4.;
Real yd=(-V1(-1 + tjj,tkk) - V1(-1 + tjj,1 + tkk) + V1(1 + tjj,tkk) + V1(1 + tjj,1 + tkk))/4.;
Real zd=-V1(tjj,tkk) + V1(tjj,1 + tkk);
#ifdef flux_dump
printf("x=%d y=%d z=%d zflux=(%f,%f,%f)\n",XTILE*blockIdx.x+ii,30*blockIdx.y+tjj,30*blockIdx.z+tkk,xd,yd,zd);
#endif
zcharge = sigmaZ(ii,tjj,tkk,0) * xd + sigmaZ(ii,tjj,tkk,1) * yd + sigmaZ(ii,tjj,tkk,2) * zd ;
dV += zcharge;
sm_psi[3][tjj][tkk]=zcharge;
}
__syncthreads();
if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y))
dV -= sm_psi[3][tjj][tkk-1];
//__syncthreads();
// x face current
if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y))
{
Real xd=-V1(tjj,tkk) + V2(tjj,tkk);
Real yd=(-V1(-1 + tjj,tkk) + V1(1 + tjj,tkk) - V2(-1 + tjj,tkk) + V2(1 + tjj,tkk))/4.;
Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V2(tjj,-1 + tkk) + V2(tjj,1 + tkk))/4.;
#ifdef flux_dump
printf("x=%d y=%d z=%d xflux=(%f,%f,%f)\n",XTILE*blockIdx.x+ii,30*blockIdx.y+tjj,30*blockIdx.z+tkk,xd,yd,zd);
#endif
xcharge = sigmaX(ii,tjj,tkk,0) * xd + sigmaX(ii,tjj,tkk,1) * yd + sigmaX(ii,tjj,tkk,2) * zd ;
dV += xcharge;
//store dV
npsi(ii,tjj,tkk) = dV;
dV = -xcharge; //pass to the next cell in x-dir
}
tii=pii; pii=cii; cii=nii; nii=tii;
}
// #undef V0(y,z)
// #undef V1(y,z)
// #undef V2(y,z)
// #undef sigmaX(x,y,z,dir)
// #undef sigmaY(x,y,z,dir)
// #undef sigmaZ(x,y,z,dir)
// #undef psi(x,y,z)
// #undef npsi(x,y,z)
}
__global__ void map_dVm(double * dVmT, double* dVm, const int *remap,int nCells)
{
int idx0 = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x * gridDim.x;
for(int idx = idx0 ; idx<nCells ; idx+=stride)
dVmT[idx] = dVm[remap[idx]];
}
//__global__ void map_V(double * VT, double* V, const int *remap,int nCells)
//{
// int idx0 = threadIdx.x + blockDim.x*blockIdx.x;
// int stride = blockDim.x * gridDim.x;
// for(int idx = idx0 ; idx<nCells ; idx+=stride)
// VT[remap[idx]] = V[idx];
//}
extern "C"
{
void call_cuda_kernels(const Real *VmRaw, Real *dVmRaw, const Real *sigmaRaw, int nx, int ny, int nz, Real *dVmOut, const int *lookup,int nCells)
{
//determine block dim
//1. blockdim.z and blockdim.y are determined in a simple way.
int bdimz = (int)((nz-2)/30) + ((nz-2)%30==0?0:1);
int bdimy = (int)((ny-2)/30) + ((ny-2)%30==0?0:1);
int bdimx = (int)((nx-2)/XTILE) + ((nx-2)%XTILE==0?0:1);
// printf("Vm=%p dVm=%p sigma=%p \n",VmRaw,dVmRaw,sigmaRaw);
// printf("call_cuda_kernels %d,%d,%d (%d,%d,%d)\n",nx,ny,nz,bdimx,bdimy,bdimz);
#ifdef GPU_SM_70
cudaFuncSetAttribute(diff_6face_v1, cudaFuncAttributePreferredSharedMemoryCarveout, 50);
#endif
//map_V<<<112,512>>>(VmBlockRaw,VmRaw,lookup,nCells);
diff_6face_v1<<<dim3(bdimx,bdimy,bdimz),dim3(32,32,1)>>>(VmRaw,dVmRaw,sigmaRaw,sigmaRaw+3*nx*ny*nz,sigmaRaw+6*nx*ny*nz,nx,ny,nz);
map_dVm<<<112,512>>>(dVmRaw,dVmOut,lookup,nCells);
}
}
|
ce34b41857548ba43de188dd717b0af6122ef295.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO JSON reader class implementation
*/
#include "reader_impl.hpp"
#include <io/comp/io_uncomp.h>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/type_conversion.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/utilities/trie.cuh>
#include <cudf/groupby.hpp>
#include <cudf/sorting.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_scalar.hpp>
#include <thrust/optional.h>
using cudf::detail::host_span;
namespace cudf {
namespace io {
namespace detail {
namespace json {
using namespace cudf::io;
namespace {
/**
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the JSON file (optional)
*
* @return Estimated maximum size of a row, in bytes
*/
constexpr size_t calculate_max_row_size(int num_columns = 0) noexcept
{
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
return num_columns == 0
? max_row_bytes // Use flat size if the # of columns is not known
: base_padding +
num_columns * column_bytes; // Expand size based on the # of columns, if available
}
} // anonymous namespace
/**
* @brief Aggregate the table containing keys info by their hash values.
*
* @param[in] info Table with columns containing key offsets, lengths and hashes, respectively
*
* @return Table with data aggregated by key hash values
*/
std::unique_ptr<table> aggregate_keys_info(std::unique_ptr<table> info)
{
auto const info_view = info->view();
std::vector<groupby::aggregation_request> requests;
requests.emplace_back(groupby::aggregation_request{info_view.column(0)});
requests.back().aggregations.emplace_back(make_min_aggregation());
requests.back().aggregations.emplace_back(make_nth_element_aggregation(0));
requests.emplace_back(groupby::aggregation_request{info_view.column(1)});
requests.back().aggregations.emplace_back(make_min_aggregation());
requests.back().aggregations.emplace_back(make_nth_element_aggregation(0));
// Aggregate by hash values
groupby::groupby gb_obj(
table_view({info_view.column(2)}), null_policy::EXCLUDE, sorted::NO, {}, {});
auto result = gb_obj.aggregate(requests); // TODO: no stream parameter?
std::vector<std::unique_ptr<column>> out_columns;
out_columns.emplace_back(std::move(result.second[0].results[0])); // offsets
out_columns.emplace_back(std::move(result.second[1].results[0])); // lengths
out_columns.emplace_back(std::move(result.first->release()[0])); // hashes
return std::make_unique<table>(std::move(out_columns));
}
/**
* @brief Initializes the (key hash -> column index) hash map.
*/
col_map_ptr_type create_col_names_hash_map(column_view column_name_hashes, hipStream_t stream)
{
auto key_col_map{col_map_type::create(column_name_hashes.size())};
auto const column_data = column_name_hashes.data<uint32_t>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
column_name_hashes.size(),
[map = *key_col_map, column_data] __device__(size_type idx) mutable {
map.insert(thrust::make_pair(column_data[idx], idx));
});
return key_col_map;
}
/**
* @brief Create a table whose columns contain the information on JSON objects' keys.
*
* The columns contain name offsets in the file, name lengths and name hashes, respectively.
*
* @param[in] options Parsing options (e.g. delimiter and quotation character)
* @param[in] data Input JSON device data
* @param[in] row_offsets Device array of row start locations in the input buffer
* @param[in] stream CUDA stream used for device memory operations and kernel launches
*
* @return std::unique_ptr<table> cudf table with three columns (offsets, lenghts, hashes)
*/
std::unique_ptr<table> create_json_keys_info_table(const ParseOptions &options,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
hipStream_t stream)
{
// Count keys
rmm::device_scalar<unsigned long long int> key_counter(0, stream);
cudf::io::json::gpu::collect_keys_info(
options, data, row_offsets, key_counter.data(), {}, stream);
// Allocate columns to store hash value, length, and offset of each JSON object key in the input
auto const num_keys = key_counter.value();
std::vector<std::unique_ptr<column>> info_columns;
info_columns.emplace_back(make_numeric_column(data_type(type_id::UINT64), num_keys));
info_columns.emplace_back(make_numeric_column(data_type(type_id::UINT16), num_keys));
info_columns.emplace_back(make_numeric_column(data_type(type_id::UINT32), num_keys));
// Create a table out of these columns to pass them around more easily
auto info_table = std::make_unique<table>(std::move(info_columns));
auto const info_table_mdv = mutable_table_device_view::create(info_table->mutable_view(), stream);
// Reset the key counter - now used for indexing
key_counter.set_value(0, stream);
// Fill the allocated columns
cudf::io::json::gpu::collect_keys_info(
options, data, row_offsets, key_counter.data(), {*info_table_mdv}, stream);
return info_table;
}
/**
* @brief Extract the keys from the JSON file the name offsets/lengths.
*/
std::vector<std::string> create_key_strings(char const *h_data,
table_view sorted_info,
hipStream_t stream)
{
auto const num_cols = sorted_info.num_rows();
std::vector<uint64_t> h_offsets(num_cols);
hipMemcpyAsync(h_offsets.data(),
sorted_info.column(0).data<uint64_t>(),
sizeof(uint64_t) * num_cols,
hipMemcpyDefault,
stream);
std::vector<uint16_t> h_lens(num_cols);
hipMemcpyAsync(h_lens.data(),
sorted_info.column(1).data<uint16_t>(),
sizeof(uint16_t) * num_cols,
hipMemcpyDefault,
stream);
std::vector<std::string> names(num_cols);
std::transform(h_offsets.cbegin(),
h_offsets.cend(),
h_lens.cbegin(),
names.begin(),
[&](auto offset, auto len) { return std::string(h_data + offset, len); });
return names;
}
auto sort_keys_info_by_offset(std::unique_ptr<table> info)
{
auto const agg_offset_col_view = info->get_column(0).view();
return sort_by_key(info->view(), table_view({agg_offset_col_view}));
}
/**
* @brief Extract JSON object keys from a JSON file.
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @return Names of JSON object keys in the file
*/
std::pair<std::vector<std::string>, col_map_ptr_type> reader::impl::get_json_object_keys_hashes(
hipStream_t stream)
{
auto info = create_json_keys_info_table(
opts_,
device_span<char const>(static_cast<char const *>(data_.data()), data_.size()),
rec_starts_,
stream);
auto aggregated_info = aggregate_keys_info(std::move(info));
auto sorted_info = sort_keys_info_by_offset(std::move(aggregated_info));
return {create_key_strings(uncomp_data_, sorted_info->view(), stream),
create_col_names_hash_map(sorted_info->get_column(2).view(), stream)};
}
/**
* @brief Ingest input JSON file/buffer, without decompression.
*
* Sets the source_, byte_range_offset_, and byte_range_size_ data members
*
* @param[in] range_offset Number of bytes offset from the start
* @param[in] range_size Bytes to read; use `0` for all remaining data
*/
void reader::impl::ingest_raw_input(size_t range_offset, size_t range_size)
{
size_t map_range_size = 0;
if (range_size != 0) {
map_range_size = range_size + calculate_max_row_size(options_.get_dtypes().size());
}
// Support delayed opening of the file if using memory mapping datasource
// This allows only mapping of a subset of the file if using byte range
if (source_ == nullptr) {
assert(!filepath_.empty());
source_ = datasource::create(filepath_, range_offset, map_range_size);
}
if (!source_->is_empty()) {
auto data_size = (map_range_size != 0) ? map_range_size : source_->size();
buffer_ = source_->host_read(range_offset, data_size);
}
byte_range_offset_ = range_offset;
byte_range_size_ = range_size;
load_whole_file_ = byte_range_offset_ == 0 && byte_range_size_ == 0;
}
/**
* @brief Decompress the input data, if needed
*
* Sets the uncomp_data_ and uncomp_size_ data members
* Loads the data into device memory if byte range parameters are not used
*/
void reader::impl::decompress_input(hipStream_t stream)
{
const auto compression_type =
infer_compression_type(options_.get_compression(),
filepath_,
{{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}});
if (compression_type == "none") {
// Do not use the owner vector here to avoid extra copy
uncomp_data_ = reinterpret_cast<const char *>(buffer_->data());
uncomp_size_ = buffer_->size();
} else {
uncomp_data_owner_ = get_uncompressed_data( //
host_span<char const>( //
reinterpret_cast<const char *>(buffer_->data()),
buffer_->size()),
compression_type);
uncomp_data_ = uncomp_data_owner_.data();
uncomp_size_ = uncomp_data_owner_.size();
}
if (load_whole_file_) data_ = rmm::device_buffer(uncomp_data_, uncomp_size_, stream);
}
/**
* @brief Finds all record starts in the file and stores them in rec_starts_
*
* Does not upload the entire file to the GPU
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
void reader::impl::set_record_starts(hipStream_t stream)
{
std::vector<char> chars_to_count{'\n'};
// Currently, ignoring lineterminations within quotes is handled by recording the records of both,
// and then filtering out the records that is a quotechar or a linetermination within a quotechar
// pair.
if (allow_newlines_in_strings_) { chars_to_count.push_back('\"'); }
// If not starting at an offset, add an extra row to account for the first row in the file
cudf::size_type prefilter_count = ((byte_range_offset_ == 0) ? 1 : 0);
if (load_whole_file_) {
prefilter_count += count_all_from_set(data_, chars_to_count);
} else {
prefilter_count += count_all_from_set(uncomp_data_, uncomp_size_, chars_to_count);
}
rec_starts_.resize(prefilter_count);
auto *find_result_ptr = rec_starts_.data().get();
// Manually adding an extra row to account for the first row in the file
if (byte_range_offset_ == 0) {
find_result_ptr++;
CUDA_TRY(hipMemsetAsync(rec_starts_.data().get(), 0ull, sizeof(uint64_t), stream));
}
std::vector<char> chars_to_find{'\n'};
if (allow_newlines_in_strings_) { chars_to_find.push_back('\"'); }
// Passing offset = 1 to return positions AFTER the found character
if (load_whole_file_) {
find_all_from_set(data_, chars_to_find, 1, find_result_ptr);
} else {
find_all_from_set(uncomp_data_, uncomp_size_, chars_to_find, 1, find_result_ptr);
}
// Previous call stores the record pinput_file.typeositions as encountered by all threads
// Sort the record positions as subsequent processing may require filtering
// certain rows or other processing on specific records
thrust::sort(rmm::exec_policy()->on(stream), rec_starts_.begin(), rec_starts_.end());
auto filtered_count = prefilter_count;
if (allow_newlines_in_strings_) {
thrust::host_vector<uint64_t> h_rec_starts = rec_starts_;
bool quotation = false;
for (cudf::size_type i = 1; i < prefilter_count; ++i) {
if (uncomp_data_[h_rec_starts[i] - 1] == '\"') {
quotation = !quotation;
h_rec_starts[i] = uncomp_size_;
filtered_count--;
} else if (quotation) {
h_rec_starts[i] = uncomp_size_;
filtered_count--;
}
}
rec_starts_ = h_rec_starts;
thrust::sort(rmm::exec_policy()->on(stream), rec_starts_.begin(), rec_starts_.end());
}
// Exclude the ending newline as it does not precede a record start
if (uncomp_data_[uncomp_size_ - 1] == '\n') { filtered_count--; }
rec_starts_.resize(filtered_count);
}
/**
* @brief Uploads the relevant segment of the input json data onto the GPU.
*
* Sets the d_data_ data member.
* Only rows that need to be parsed are copied, based on the byte range
* Also updates the array of record starts to match the device data offset.
*
*/
void reader::impl::upload_data_to_device(hipStream_t stream)
{
size_t start_offset = 0;
size_t end_offset = uncomp_size_;
// Trim lines that are outside range
if (byte_range_size_ != 0 || byte_range_offset_ != 0) {
thrust::host_vector<uint64_t> h_rec_starts = rec_starts_;
if (byte_range_size_ != 0) {
auto it = h_rec_starts.end() - 1;
while (it >= h_rec_starts.begin() && *it > byte_range_size_) {
end_offset = *it;
--it;
}
h_rec_starts.erase(it + 1, h_rec_starts.end());
}
// Resize to exclude rows outside of the range
// Adjust row start positions to account for the data subcopy
start_offset = h_rec_starts.front();
rec_starts_.resize(h_rec_starts.size());
thrust::transform(rmm::exec_policy()->on(stream),
rec_starts_.begin(),
rec_starts_.end(),
thrust::make_constant_iterator(start_offset),
rec_starts_.begin(),
thrust::minus<uint64_t>());
}
const size_t bytes_to_upload = end_offset - start_offset;
CUDF_EXPECTS(bytes_to_upload <= uncomp_size_,
"Error finding the record within the specified byte range.\n");
// Upload the raw data that is within the rows of interest
data_ = rmm::device_buffer(uncomp_data_ + start_offset, bytes_to_upload, stream);
}
/**
* @brief Parse the first row to set the column name
*
* Sets the column_names_ data member
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
void reader::impl::set_column_names(hipStream_t stream)
{
// If file only contains one row, use the file size for the row size
uint64_t first_row_len = data_.size() / sizeof(char);
if (rec_starts_.size() > 1) {
// Set first_row_len to the offset of the second row, if it exists
CUDA_TRY(hipMemcpyAsync(&first_row_len,
rec_starts_.data().get() + 1,
sizeof(uint64_t),
hipMemcpyDeviceToHost,
stream));
}
std::vector<char> first_row(first_row_len);
CUDA_TRY(hipMemcpyAsync(
first_row.data(), data_.data(), first_row_len * sizeof(char), hipMemcpyDeviceToHost, stream));
CUDA_TRY(hipStreamSynchronize(stream));
// Determine the row format between:
// JSON array - [val1, val2, ...] and
// JSON object - {"col1":val1, "col2":val2, ...}
// based on the top level opening bracket
const auto first_square_bracket = std::find(first_row.begin(), first_row.end(), '[');
const auto first_curly_bracket = std::find(first_row.begin(), first_row.end(), '{');
CUDF_EXPECTS(first_curly_bracket != first_row.end() || first_square_bracket != first_row.end(),
"Input data is not a valid JSON file.");
// If the first opening bracket is '{', assume object format
if (first_curly_bracket < first_square_bracket) {
// use keys as column names if input rows are objects
auto keys_desc = get_json_object_keys_hashes(stream);
metadata_.column_names = keys_desc.first;
set_column_map(std::move(keys_desc.second));
} else {
int cols_found = 0;
bool quotation = false;
for (size_t pos = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts_.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts_.delimiter)) {
metadata_.column_names.emplace_back(std::to_string(cols_found++));
}
}
}
}
/**
* @brief Set the data type array data member
*
* If user does not pass the data types, deduces types from the file content
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
void reader::impl::set_data_types(hipStream_t stream)
{
auto const dtype = options_.get_dtypes();
if (!dtype.empty()) {
CUDF_EXPECTS(dtype.size() == metadata_.column_names.size(),
"Need to specify the type of each column.\n");
// Assume that the dtype is in dictionary format only if all elements contain a colon
const bool is_dict =
std::all_of(std::cbegin(dtype), std::cend(dtype), [](const std::string &s) {
return std::find(std::cbegin(s), std::cend(s), ':') != std::cend(s);
});
// When C++17, use std::string_view and CTAD
auto split_on_colon = [](auto const &s) -> std::pair<std::string, std::string> {
auto const i = s.find(":");
auto const a = s.substr(0, i);
auto const b = s.substr(i + 1);
return {a, b};
};
if (is_dict) {
std::map<std::string, data_type> col_type_map;
std::transform(std::cbegin(dtype),
std::cend(dtype),
std::inserter(col_type_map, col_type_map.end()),
[&](auto const &ts) -> std::pair<std::string, data_type> {
// When C++17, use structured bindings: auto const& [col_name, type_str] = ..
auto split = split_on_colon(ts);
return {split.first, convert_string_to_dtype(split.second)};
});
// Using the map here allows O(n log n) complexity
std::transform(std::cbegin(metadata_.column_names),
std::cend(metadata_.column_names),
std::back_inserter(dtypes_),
[&](auto const &column_name) { return col_type_map[column_name]; });
} else {
std::transform(std::cbegin(dtype),
std::cend(dtype),
std::back_inserter(dtypes_),
[](auto const &col_dtype) { return convert_string_to_dtype(col_dtype); });
}
} else {
CUDF_EXPECTS(rec_starts_.size() != 0, "No data available for data type inference.\n");
auto const num_columns = metadata_.column_names.size();
auto const do_set_null_count = key_to_col_idx_map_ != nullptr;
auto const h_column_infos = cudf::io::json::gpu::detect_data_types(
opts_,
device_span<char const>(static_cast<char const *>(data_.data()), data_.size()),
rec_starts_,
do_set_null_count,
num_columns,
get_column_map_device_ptr(),
stream);
auto get_type_id = [&](auto const &cinfo) {
if (cinfo.null_count == static_cast<int>(rec_starts_.size())) {
// Entire column is NULL; allocate the smallest amount of memory
return type_id::INT8;
} else if (cinfo.string_count > 0) {
return type_id::STRING;
} else if (cinfo.datetime_count > 0) {
return type_id::TIMESTAMP_MILLISECONDS;
} else if (cinfo.float_count > 0 || (cinfo.int_count > 0 && cinfo.null_count > 0)) {
return type_id::FLOAT64;
} else if (cinfo.int_count > 0) {
return type_id::INT64;
} else if (cinfo.bool_count > 0) {
return type_id::BOOL8;
} else {
CUDF_FAIL("Data type detection failed.\n");
}
};
std::transform(std::cbegin(h_column_infos),
std::cend(h_column_infos),
std::back_inserter(dtypes_),
[&](auto const &cinfo) { return data_type{get_type_id(cinfo)}; });
}
} // namespace json
/**
* @brief Parse the input data and store results a table
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @return table_with_metadata struct
*/
table_with_metadata reader::impl::convert_data_to_table(hipStream_t stream)
{
const auto num_columns = dtypes_.size();
const auto num_records = rec_starts_.size();
// alloc output buffers.
std::vector<column_buffer> out_buffers;
for (size_t col = 0; col < num_columns; ++col) {
out_buffers.emplace_back(dtypes_[col], num_records, true, stream, mr_);
}
thrust::host_vector<data_type> h_dtypes(num_columns);
thrust::host_vector<void *> h_data(num_columns);
thrust::host_vector<bitmask_type *> h_valid(num_columns);
for (size_t i = 0; i < num_columns; ++i) {
h_dtypes[i] = dtypes_[i];
h_data[i] = out_buffers[i].data();
h_valid[i] = out_buffers[i].null_mask();
}
rmm::device_vector<data_type> d_dtypes = h_dtypes;
rmm::device_vector<void *> d_data = h_data;
rmm::device_vector<cudf::bitmask_type *> d_valid = h_valid;
rmm::device_vector<cudf::size_type> d_valid_counts(num_columns, 0);
cudf::io::json::gpu::convert_json_to_columns(
opts_,
device_span<char const>(static_cast<char const *>(data_.data()), data_.size()),
rec_starts_,
d_dtypes,
get_column_map_device_ptr(),
d_data,
d_valid,
d_valid_counts,
stream);
CUDA_TRY(hipStreamSynchronize(stream));
CUDA_TRY(hipGetLastError());
// postprocess columns
auto target = make_strings_column(
std::vector<char>{'\\', '"', '\\', '\\', '\\', 't', '\\', 'r', '\\', 'b'}, {0, 2, 4, 6, 8, 10});
auto repl = make_strings_column({'"', '\\', '\t', '\r', '\b'}, {0, 1, 2, 3, 4, 5});
thrust::host_vector<cudf::size_type> h_valid_counts = d_valid_counts;
std::vector<std::unique_ptr<column>> out_columns;
for (size_t i = 0; i < num_columns; ++i) {
out_buffers[i].null_count() = num_records - h_valid_counts[i];
auto out_column = make_column(out_buffers[i], stream, mr_);
if (out_column->type().id() == type_id::STRING) {
// Need to remove escape character in case of '\"' and '\\'
out_columns.emplace_back(cudf::strings::detail::replace(
out_column->view(), target->view(), repl->view(), mr_, stream));
} else {
out_columns.emplace_back(std::move(out_column));
}
}
CUDF_EXPECTS(!out_columns.empty(), "No columns created from json input");
return table_with_metadata{std::make_unique<table>(std::move(out_columns)), metadata_};
}
reader::impl::impl(std::unique_ptr<datasource> source,
std::string filepath,
json_reader_options const &options,
rmm::mr::device_memory_resource *mr)
: source_(std::move(source)), filepath_(filepath), options_(options), mr_(mr)
{
CUDF_EXPECTS(options_.is_enabled_lines(), "Only JSON Lines format is currently supported.\n");
d_trie_true_ = createSerializedTrie({"true"});
opts_.trueValuesTrie = d_trie_true_.data().get();
d_trie_false_ = createSerializedTrie({"false"});
opts_.falseValuesTrie = d_trie_false_.data().get();
d_trie_na_ = createSerializedTrie({"null"});
opts_.naValuesTrie = d_trie_na_.data().get();
opts_.dayfirst = options.is_enabled_dayfirst();
}
/**
* @brief Read an entire set or a subset of data from the source
*
* @param[in] range_offset Number of bytes offset from the start
* @param[in] range_size Bytes to read; use `0` for all remaining data
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @return Table and its metadata
*/
table_with_metadata reader::impl::read(json_reader_options const &options, hipStream_t stream)
{
auto range_offset = options.get_byte_range_offset();
auto range_size = options.get_byte_range_size();
ingest_raw_input(range_offset, range_size);
CUDF_EXPECTS(buffer_ != nullptr, "Ingest failed: input data is null.\n");
decompress_input(stream);
CUDF_EXPECTS(uncomp_data_ != nullptr, "Ingest failed: uncompressed input data is null.\n");
CUDF_EXPECTS(uncomp_size_ != 0, "Ingest failed: uncompressed input data has zero size.\n");
set_record_starts(stream);
CUDF_EXPECTS(!rec_starts_.empty(), "Error enumerating records.\n");
upload_data_to_device(stream);
CUDF_EXPECTS(data_.size() != 0, "Error uploading input data to the GPU.\n");
set_column_names(stream);
CUDF_EXPECTS(!metadata_.column_names.empty(), "Error determining column names.\n");
set_data_types(stream);
CUDF_EXPECTS(!dtypes_.empty(), "Error in data type detection.\n");
return convert_data_to_table(stream);
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
json_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
// Delay actual instantiation of data source until read to allow for
// partial memory mapping of file using byte ranges
_impl = std::make_unique<impl>(nullptr, filepaths[0], options, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
json_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), "", options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(json_reader_options const &options, hipStream_t stream)
{
return table_with_metadata{_impl->read(options, stream)};
}
} // namespace json
} // namespace detail
} // namespace io
} // namespace cudf
| ce34b41857548ba43de188dd717b0af6122ef295.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO JSON reader class implementation
*/
#include "reader_impl.hpp"
#include <io/comp/io_uncomp.h>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/type_conversion.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/utilities/trie.cuh>
#include <cudf/groupby.hpp>
#include <cudf/sorting.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_scalar.hpp>
#include <thrust/optional.h>
using cudf::detail::host_span;
namespace cudf {
namespace io {
namespace detail {
namespace json {
using namespace cudf::io;
namespace {
/**
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the JSON file (optional)
*
* @return Estimated maximum size of a row, in bytes
*/
constexpr size_t calculate_max_row_size(int num_columns = 0) noexcept
{
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
return num_columns == 0
? max_row_bytes // Use flat size if the # of columns is not known
: base_padding +
num_columns * column_bytes; // Expand size based on the # of columns, if available
}
} // anonymous namespace
/**
* @brief Aggregate the table containing keys info by their hash values.
*
* @param[in] info Table with columns containing key offsets, lengths and hashes, respectively
*
* @return Table with data aggregated by key hash values
*/
std::unique_ptr<table> aggregate_keys_info(std::unique_ptr<table> info)
{
auto const info_view = info->view();
std::vector<groupby::aggregation_request> requests;
requests.emplace_back(groupby::aggregation_request{info_view.column(0)});
requests.back().aggregations.emplace_back(make_min_aggregation());
requests.back().aggregations.emplace_back(make_nth_element_aggregation(0));
requests.emplace_back(groupby::aggregation_request{info_view.column(1)});
requests.back().aggregations.emplace_back(make_min_aggregation());
requests.back().aggregations.emplace_back(make_nth_element_aggregation(0));
// Aggregate by hash values
groupby::groupby gb_obj(
table_view({info_view.column(2)}), null_policy::EXCLUDE, sorted::NO, {}, {});
auto result = gb_obj.aggregate(requests); // TODO: no stream parameter?
std::vector<std::unique_ptr<column>> out_columns;
out_columns.emplace_back(std::move(result.second[0].results[0])); // offsets
out_columns.emplace_back(std::move(result.second[1].results[0])); // lengths
out_columns.emplace_back(std::move(result.first->release()[0])); // hashes
return std::make_unique<table>(std::move(out_columns));
}
/**
* @brief Initializes the (key hash -> column index) hash map.
*/
col_map_ptr_type create_col_names_hash_map(column_view column_name_hashes, cudaStream_t stream)
{
auto key_col_map{col_map_type::create(column_name_hashes.size())};
auto const column_data = column_name_hashes.data<uint32_t>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
column_name_hashes.size(),
[map = *key_col_map, column_data] __device__(size_type idx) mutable {
map.insert(thrust::make_pair(column_data[idx], idx));
});
return key_col_map;
}
/**
* @brief Create a table whose columns contain the information on JSON objects' keys.
*
* The columns contain name offsets in the file, name lengths and name hashes, respectively.
*
* @param[in] options Parsing options (e.g. delimiter and quotation character)
* @param[in] data Input JSON device data
* @param[in] row_offsets Device array of row start locations in the input buffer
* @param[in] stream CUDA stream used for device memory operations and kernel launches
*
* @return std::unique_ptr<table> cudf table with three columns (offsets, lenghts, hashes)
*/
std::unique_ptr<table> create_json_keys_info_table(const ParseOptions &options,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
cudaStream_t stream)
{
// Count keys
rmm::device_scalar<unsigned long long int> key_counter(0, stream);
cudf::io::json::gpu::collect_keys_info(
options, data, row_offsets, key_counter.data(), {}, stream);
// Allocate columns to store hash value, length, and offset of each JSON object key in the input
auto const num_keys = key_counter.value();
std::vector<std::unique_ptr<column>> info_columns;
info_columns.emplace_back(make_numeric_column(data_type(type_id::UINT64), num_keys));
info_columns.emplace_back(make_numeric_column(data_type(type_id::UINT16), num_keys));
info_columns.emplace_back(make_numeric_column(data_type(type_id::UINT32), num_keys));
// Create a table out of these columns to pass them around more easily
auto info_table = std::make_unique<table>(std::move(info_columns));
auto const info_table_mdv = mutable_table_device_view::create(info_table->mutable_view(), stream);
// Reset the key counter - now used for indexing
key_counter.set_value(0, stream);
// Fill the allocated columns
cudf::io::json::gpu::collect_keys_info(
options, data, row_offsets, key_counter.data(), {*info_table_mdv}, stream);
return info_table;
}
/**
* @brief Extract the keys from the JSON file the name offsets/lengths.
*/
std::vector<std::string> create_key_strings(char const *h_data,
table_view sorted_info,
cudaStream_t stream)
{
auto const num_cols = sorted_info.num_rows();
std::vector<uint64_t> h_offsets(num_cols);
cudaMemcpyAsync(h_offsets.data(),
sorted_info.column(0).data<uint64_t>(),
sizeof(uint64_t) * num_cols,
cudaMemcpyDefault,
stream);
std::vector<uint16_t> h_lens(num_cols);
cudaMemcpyAsync(h_lens.data(),
sorted_info.column(1).data<uint16_t>(),
sizeof(uint16_t) * num_cols,
cudaMemcpyDefault,
stream);
std::vector<std::string> names(num_cols);
std::transform(h_offsets.cbegin(),
h_offsets.cend(),
h_lens.cbegin(),
names.begin(),
[&](auto offset, auto len) { return std::string(h_data + offset, len); });
return names;
}
auto sort_keys_info_by_offset(std::unique_ptr<table> info)
{
auto const agg_offset_col_view = info->get_column(0).view();
return sort_by_key(info->view(), table_view({agg_offset_col_view}));
}
/**
* @brief Extract JSON object keys from a JSON file.
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @return Names of JSON object keys in the file
*/
std::pair<std::vector<std::string>, col_map_ptr_type> reader::impl::get_json_object_keys_hashes(
cudaStream_t stream)
{
auto info = create_json_keys_info_table(
opts_,
device_span<char const>(static_cast<char const *>(data_.data()), data_.size()),
rec_starts_,
stream);
auto aggregated_info = aggregate_keys_info(std::move(info));
auto sorted_info = sort_keys_info_by_offset(std::move(aggregated_info));
return {create_key_strings(uncomp_data_, sorted_info->view(), stream),
create_col_names_hash_map(sorted_info->get_column(2).view(), stream)};
}
/**
* @brief Ingest input JSON file/buffer, without decompression.
*
* Sets the source_, byte_range_offset_, and byte_range_size_ data members
*
* @param[in] range_offset Number of bytes offset from the start
* @param[in] range_size Bytes to read; use `0` for all remaining data
*/
void reader::impl::ingest_raw_input(size_t range_offset, size_t range_size)
{
size_t map_range_size = 0;
if (range_size != 0) {
map_range_size = range_size + calculate_max_row_size(options_.get_dtypes().size());
}
// Support delayed opening of the file if using memory mapping datasource
// This allows only mapping of a subset of the file if using byte range
if (source_ == nullptr) {
assert(!filepath_.empty());
source_ = datasource::create(filepath_, range_offset, map_range_size);
}
if (!source_->is_empty()) {
auto data_size = (map_range_size != 0) ? map_range_size : source_->size();
buffer_ = source_->host_read(range_offset, data_size);
}
byte_range_offset_ = range_offset;
byte_range_size_ = range_size;
load_whole_file_ = byte_range_offset_ == 0 && byte_range_size_ == 0;
}
/**
* @brief Decompress the input data, if needed
*
* Sets the uncomp_data_ and uncomp_size_ data members
* Loads the data into device memory if byte range parameters are not used
*/
void reader::impl::decompress_input(cudaStream_t stream)
{
const auto compression_type =
infer_compression_type(options_.get_compression(),
filepath_,
{{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}});
if (compression_type == "none") {
// Do not use the owner vector here to avoid extra copy
uncomp_data_ = reinterpret_cast<const char *>(buffer_->data());
uncomp_size_ = buffer_->size();
} else {
uncomp_data_owner_ = get_uncompressed_data( //
host_span<char const>( //
reinterpret_cast<const char *>(buffer_->data()),
buffer_->size()),
compression_type);
uncomp_data_ = uncomp_data_owner_.data();
uncomp_size_ = uncomp_data_owner_.size();
}
if (load_whole_file_) data_ = rmm::device_buffer(uncomp_data_, uncomp_size_, stream);
}
/**
* @brief Finds all record starts in the file and stores them in rec_starts_
*
* Does not upload the entire file to the GPU
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
void reader::impl::set_record_starts(cudaStream_t stream)
{
std::vector<char> chars_to_count{'\n'};
// Currently, ignoring lineterminations within quotes is handled by recording the records of both,
// and then filtering out the records that is a quotechar or a linetermination within a quotechar
// pair.
if (allow_newlines_in_strings_) { chars_to_count.push_back('\"'); }
// If not starting at an offset, add an extra row to account for the first row in the file
cudf::size_type prefilter_count = ((byte_range_offset_ == 0) ? 1 : 0);
if (load_whole_file_) {
prefilter_count += count_all_from_set(data_, chars_to_count);
} else {
prefilter_count += count_all_from_set(uncomp_data_, uncomp_size_, chars_to_count);
}
rec_starts_.resize(prefilter_count);
auto *find_result_ptr = rec_starts_.data().get();
// Manually adding an extra row to account for the first row in the file
if (byte_range_offset_ == 0) {
find_result_ptr++;
CUDA_TRY(cudaMemsetAsync(rec_starts_.data().get(), 0ull, sizeof(uint64_t), stream));
}
std::vector<char> chars_to_find{'\n'};
if (allow_newlines_in_strings_) { chars_to_find.push_back('\"'); }
// Passing offset = 1 to return positions AFTER the found character
if (load_whole_file_) {
find_all_from_set(data_, chars_to_find, 1, find_result_ptr);
} else {
find_all_from_set(uncomp_data_, uncomp_size_, chars_to_find, 1, find_result_ptr);
}
// Previous call stores the record pinput_file.typeositions as encountered by all threads
// Sort the record positions as subsequent processing may require filtering
// certain rows or other processing on specific records
thrust::sort(rmm::exec_policy()->on(stream), rec_starts_.begin(), rec_starts_.end());
auto filtered_count = prefilter_count;
if (allow_newlines_in_strings_) {
thrust::host_vector<uint64_t> h_rec_starts = rec_starts_;
bool quotation = false;
for (cudf::size_type i = 1; i < prefilter_count; ++i) {
if (uncomp_data_[h_rec_starts[i] - 1] == '\"') {
quotation = !quotation;
h_rec_starts[i] = uncomp_size_;
filtered_count--;
} else if (quotation) {
h_rec_starts[i] = uncomp_size_;
filtered_count--;
}
}
rec_starts_ = h_rec_starts;
thrust::sort(rmm::exec_policy()->on(stream), rec_starts_.begin(), rec_starts_.end());
}
// Exclude the ending newline as it does not precede a record start
if (uncomp_data_[uncomp_size_ - 1] == '\n') { filtered_count--; }
rec_starts_.resize(filtered_count);
}
/**
* @brief Uploads the relevant segment of the input json data onto the GPU.
*
* Sets the d_data_ data member.
* Only rows that need to be parsed are copied, based on the byte range
* Also updates the array of record starts to match the device data offset.
*
*/
void reader::impl::upload_data_to_device(cudaStream_t stream)
{
size_t start_offset = 0;
size_t end_offset = uncomp_size_;
// Trim lines that are outside range
if (byte_range_size_ != 0 || byte_range_offset_ != 0) {
thrust::host_vector<uint64_t> h_rec_starts = rec_starts_;
if (byte_range_size_ != 0) {
auto it = h_rec_starts.end() - 1;
while (it >= h_rec_starts.begin() && *it > byte_range_size_) {
end_offset = *it;
--it;
}
h_rec_starts.erase(it + 1, h_rec_starts.end());
}
// Resize to exclude rows outside of the range
// Adjust row start positions to account for the data subcopy
start_offset = h_rec_starts.front();
rec_starts_.resize(h_rec_starts.size());
thrust::transform(rmm::exec_policy()->on(stream),
rec_starts_.begin(),
rec_starts_.end(),
thrust::make_constant_iterator(start_offset),
rec_starts_.begin(),
thrust::minus<uint64_t>());
}
const size_t bytes_to_upload = end_offset - start_offset;
CUDF_EXPECTS(bytes_to_upload <= uncomp_size_,
"Error finding the record within the specified byte range.\n");
// Upload the raw data that is within the rows of interest
data_ = rmm::device_buffer(uncomp_data_ + start_offset, bytes_to_upload, stream);
}
/**
* @brief Parse the first row to set the column name
*
* Sets the column_names_ data member
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
void reader::impl::set_column_names(cudaStream_t stream)
{
// If file only contains one row, use the file size for the row size
uint64_t first_row_len = data_.size() / sizeof(char);
if (rec_starts_.size() > 1) {
// Set first_row_len to the offset of the second row, if it exists
CUDA_TRY(cudaMemcpyAsync(&first_row_len,
rec_starts_.data().get() + 1,
sizeof(uint64_t),
cudaMemcpyDeviceToHost,
stream));
}
std::vector<char> first_row(first_row_len);
CUDA_TRY(cudaMemcpyAsync(
first_row.data(), data_.data(), first_row_len * sizeof(char), cudaMemcpyDeviceToHost, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
// Determine the row format between:
// JSON array - [val1, val2, ...] and
// JSON object - {"col1":val1, "col2":val2, ...}
// based on the top level opening bracket
const auto first_square_bracket = std::find(first_row.begin(), first_row.end(), '[');
const auto first_curly_bracket = std::find(first_row.begin(), first_row.end(), '{');
CUDF_EXPECTS(first_curly_bracket != first_row.end() || first_square_bracket != first_row.end(),
"Input data is not a valid JSON file.");
// If the first opening bracket is '{', assume object format
if (first_curly_bracket < first_square_bracket) {
// use keys as column names if input rows are objects
auto keys_desc = get_json_object_keys_hashes(stream);
metadata_.column_names = keys_desc.first;
set_column_map(std::move(keys_desc.second));
} else {
int cols_found = 0;
bool quotation = false;
for (size_t pos = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts_.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts_.delimiter)) {
metadata_.column_names.emplace_back(std::to_string(cols_found++));
}
}
}
}
/**
* @brief Set the data type array data member
*
* If user does not pass the data types, deduces types from the file content
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
void reader::impl::set_data_types(cudaStream_t stream)
{
auto const dtype = options_.get_dtypes();
if (!dtype.empty()) {
CUDF_EXPECTS(dtype.size() == metadata_.column_names.size(),
"Need to specify the type of each column.\n");
// Assume that the dtype is in dictionary format only if all elements contain a colon
const bool is_dict =
std::all_of(std::cbegin(dtype), std::cend(dtype), [](const std::string &s) {
return std::find(std::cbegin(s), std::cend(s), ':') != std::cend(s);
});
// When C++17, use std::string_view and CTAD
auto split_on_colon = [](auto const &s) -> std::pair<std::string, std::string> {
auto const i = s.find(":");
auto const a = s.substr(0, i);
auto const b = s.substr(i + 1);
return {a, b};
};
if (is_dict) {
std::map<std::string, data_type> col_type_map;
std::transform(std::cbegin(dtype),
std::cend(dtype),
std::inserter(col_type_map, col_type_map.end()),
[&](auto const &ts) -> std::pair<std::string, data_type> {
// When C++17, use structured bindings: auto const& [col_name, type_str] = ..
auto split = split_on_colon(ts);
return {split.first, convert_string_to_dtype(split.second)};
});
// Using the map here allows O(n log n) complexity
std::transform(std::cbegin(metadata_.column_names),
std::cend(metadata_.column_names),
std::back_inserter(dtypes_),
[&](auto const &column_name) { return col_type_map[column_name]; });
} else {
std::transform(std::cbegin(dtype),
std::cend(dtype),
std::back_inserter(dtypes_),
[](auto const &col_dtype) { return convert_string_to_dtype(col_dtype); });
}
} else {
CUDF_EXPECTS(rec_starts_.size() != 0, "No data available for data type inference.\n");
auto const num_columns = metadata_.column_names.size();
auto const do_set_null_count = key_to_col_idx_map_ != nullptr;
auto const h_column_infos = cudf::io::json::gpu::detect_data_types(
opts_,
device_span<char const>(static_cast<char const *>(data_.data()), data_.size()),
rec_starts_,
do_set_null_count,
num_columns,
get_column_map_device_ptr(),
stream);
auto get_type_id = [&](auto const &cinfo) {
if (cinfo.null_count == static_cast<int>(rec_starts_.size())) {
// Entire column is NULL; allocate the smallest amount of memory
return type_id::INT8;
} else if (cinfo.string_count > 0) {
return type_id::STRING;
} else if (cinfo.datetime_count > 0) {
return type_id::TIMESTAMP_MILLISECONDS;
} else if (cinfo.float_count > 0 || (cinfo.int_count > 0 && cinfo.null_count > 0)) {
return type_id::FLOAT64;
} else if (cinfo.int_count > 0) {
return type_id::INT64;
} else if (cinfo.bool_count > 0) {
return type_id::BOOL8;
} else {
CUDF_FAIL("Data type detection failed.\n");
}
};
std::transform(std::cbegin(h_column_infos),
std::cend(h_column_infos),
std::back_inserter(dtypes_),
[&](auto const &cinfo) { return data_type{get_type_id(cinfo)}; });
}
} // namespace json
/**
* @brief Parse the input data and store results a table
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @return table_with_metadata struct
*/
table_with_metadata reader::impl::convert_data_to_table(cudaStream_t stream)
{
const auto num_columns = dtypes_.size();
const auto num_records = rec_starts_.size();
// alloc output buffers.
std::vector<column_buffer> out_buffers;
for (size_t col = 0; col < num_columns; ++col) {
out_buffers.emplace_back(dtypes_[col], num_records, true, stream, mr_);
}
thrust::host_vector<data_type> h_dtypes(num_columns);
thrust::host_vector<void *> h_data(num_columns);
thrust::host_vector<bitmask_type *> h_valid(num_columns);
for (size_t i = 0; i < num_columns; ++i) {
h_dtypes[i] = dtypes_[i];
h_data[i] = out_buffers[i].data();
h_valid[i] = out_buffers[i].null_mask();
}
rmm::device_vector<data_type> d_dtypes = h_dtypes;
rmm::device_vector<void *> d_data = h_data;
rmm::device_vector<cudf::bitmask_type *> d_valid = h_valid;
rmm::device_vector<cudf::size_type> d_valid_counts(num_columns, 0);
cudf::io::json::gpu::convert_json_to_columns(
opts_,
device_span<char const>(static_cast<char const *>(data_.data()), data_.size()),
rec_starts_,
d_dtypes,
get_column_map_device_ptr(),
d_data,
d_valid,
d_valid_counts,
stream);
CUDA_TRY(cudaStreamSynchronize(stream));
CUDA_TRY(cudaGetLastError());
// postprocess columns
auto target = make_strings_column(
std::vector<char>{'\\', '"', '\\', '\\', '\\', 't', '\\', 'r', '\\', 'b'}, {0, 2, 4, 6, 8, 10});
auto repl = make_strings_column({'"', '\\', '\t', '\r', '\b'}, {0, 1, 2, 3, 4, 5});
thrust::host_vector<cudf::size_type> h_valid_counts = d_valid_counts;
std::vector<std::unique_ptr<column>> out_columns;
for (size_t i = 0; i < num_columns; ++i) {
out_buffers[i].null_count() = num_records - h_valid_counts[i];
auto out_column = make_column(out_buffers[i], stream, mr_);
if (out_column->type().id() == type_id::STRING) {
// Need to remove escape character in case of '\"' and '\\'
out_columns.emplace_back(cudf::strings::detail::replace(
out_column->view(), target->view(), repl->view(), mr_, stream));
} else {
out_columns.emplace_back(std::move(out_column));
}
}
CUDF_EXPECTS(!out_columns.empty(), "No columns created from json input");
return table_with_metadata{std::make_unique<table>(std::move(out_columns)), metadata_};
}
reader::impl::impl(std::unique_ptr<datasource> source,
std::string filepath,
json_reader_options const &options,
rmm::mr::device_memory_resource *mr)
: source_(std::move(source)), filepath_(filepath), options_(options), mr_(mr)
{
CUDF_EXPECTS(options_.is_enabled_lines(), "Only JSON Lines format is currently supported.\n");
d_trie_true_ = createSerializedTrie({"true"});
opts_.trueValuesTrie = d_trie_true_.data().get();
d_trie_false_ = createSerializedTrie({"false"});
opts_.falseValuesTrie = d_trie_false_.data().get();
d_trie_na_ = createSerializedTrie({"null"});
opts_.naValuesTrie = d_trie_na_.data().get();
opts_.dayfirst = options.is_enabled_dayfirst();
}
/**
* @brief Read an entire set or a subset of data from the source
*
* @param[in] range_offset Number of bytes offset from the start
* @param[in] range_size Bytes to read; use `0` for all remaining data
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @return Table and its metadata
*/
table_with_metadata reader::impl::read(json_reader_options const &options, cudaStream_t stream)
{
auto range_offset = options.get_byte_range_offset();
auto range_size = options.get_byte_range_size();
ingest_raw_input(range_offset, range_size);
CUDF_EXPECTS(buffer_ != nullptr, "Ingest failed: input data is null.\n");
decompress_input(stream);
CUDF_EXPECTS(uncomp_data_ != nullptr, "Ingest failed: uncompressed input data is null.\n");
CUDF_EXPECTS(uncomp_size_ != 0, "Ingest failed: uncompressed input data has zero size.\n");
set_record_starts(stream);
CUDF_EXPECTS(!rec_starts_.empty(), "Error enumerating records.\n");
upload_data_to_device(stream);
CUDF_EXPECTS(data_.size() != 0, "Error uploading input data to the GPU.\n");
set_column_names(stream);
CUDF_EXPECTS(!metadata_.column_names.empty(), "Error determining column names.\n");
set_data_types(stream);
CUDF_EXPECTS(!dtypes_.empty(), "Error in data type detection.\n");
return convert_data_to_table(stream);
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
json_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
// Delay actual instantiation of data source until read to allow for
// partial memory mapping of file using byte ranges
_impl = std::make_unique<impl>(nullptr, filepaths[0], options, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
json_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), "", options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(json_reader_options const &options, cudaStream_t stream)
{
return table_with_metadata{_impl->read(options, stream)};
}
} // namespace json
} // namespace detail
} // namespace io
} // namespace cudf
|
977a40624261616f24805934a133d3ff18de1e9b.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
// includes, kernels
#include "backprop_cuda_kernel.cu"
#include "backprop.h"
////////////////////////////////////////////////////////////////////////////////
extern "C"
void bpnn_layerforward(float *l1, float *l2, float **conn, int n1, int n2);
extern "C"
void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err);
extern "C"
void bpnn_hidden_error(float *delta_h, int nh, float *delta_o, int no, float **who, float *hidden, float *err);
extern "C"
void bpnn_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw);
extern "C"
int setup(int argc, char** argv);
extern "C"
float **alloc_2d_dbl(int m, int n);
extern "C"
float squash(float x);
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
unsigned int num_threads = 0;
unsigned int num_blocks = 0;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
setup(argc, argv);
}
extern "C"
void bpnn_train_cuda(BPNN *net, float *eo, float *eh)
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
#ifdef GPU
int m = 0;
float *input_hidden_cuda;
float *input_cuda;
float *output_hidden_cuda;
float *partial_sum;
float *hidden_partial_sum;
float *hidden_delta_cuda;
float *input_prev_weights_cuda;
float sum;
float *input_weights_one_dim;
float *input_weights_prev_one_dim;
num_blocks = in / 16;
dim3 grid( 1 , num_blocks);
dim3 threads(16 , 16);
input_weights_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float));
input_weights_prev_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float));
partial_sum = (float *) malloc(num_blocks * WIDTH * sizeof(float));
// this preprocessing stage is added to correct the bugs of wrong memcopy using two-dimensional net->inputweights
for (int k = 0; k <= in; k++) {
for (int j = 0; j <= hid; j++) {
input_weights_one_dim[m] = net->input_weights[k][j];
input_weights_prev_one_dim[m] = net-> input_prev_weights[k][j];
m++;
}
}
hipMalloc((void**) &input_cuda, (in + 1) * sizeof(float));
hipMalloc((void**) &output_hidden_cuda, (hid + 1) * sizeof(float));
hipMalloc((void**) &input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float));
hipMalloc((void**) &hidden_partial_sum, num_blocks * WIDTH * sizeof(float));
#endif
#ifdef CPU
printf("Performing CPU computation\n");
bpnn_layerforward(net->input_units, net->hidden_units,net->input_weights, in, hid);
#endif
#ifdef GPU
printf("Performing GPU computation\n");
//printf("in= %d, hid = %d, numblocks = %d\n", in, hid, num_blocks);
hipMemcpy(input_cuda, net->input_units, (in + 1) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( bpnn_layerforward_CUDA), dim3(grid), dim3(threads) , 0, 0, input_cuda,
output_hidden_cuda,
input_hidden_cuda,
hidden_partial_sum,
in,
hid);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("bpnn kernel error: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipMemcpy(partial_sum, hidden_partial_sum, num_blocks * WIDTH * sizeof(float), hipMemcpyDeviceToHost);
for (int j = 1; j <= hid; j++) {
sum = 0.0;
for (int k = 0; k < num_blocks; k++) {
sum += partial_sum[k * hid + j-1] ;
}
sum += net->input_weights[0][j];
net-> hidden_units[j] = float(1.0 / (1.0 + exp(-sum)));
}
#endif
bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out);
bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err);
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights);
#ifdef CPU
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights);
#endif
#ifdef GPU
hipMalloc((void**) &hidden_delta_cuda, (hid + 1) * sizeof(float));
hipMalloc((void**) &input_prev_weights_cuda, (in + 1) * (hid + 1) * sizeof(float));
hipMemcpy(hidden_delta_cuda, net->hidden_delta, (hid + 1) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(input_prev_weights_cuda, input_weights_prev_one_dim, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( bpnn_adjust_weights_cuda), dim3(grid), dim3(threads) , 0, 0, hidden_delta_cuda,
hid,
input_cuda,
in,
input_hidden_cuda,
input_prev_weights_cuda
);
hipMemcpy(net->input_units, input_cuda, (in + 1) * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(input_weights_one_dim, input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyDeviceToHost);
hipFree(input_cuda);
hipFree(output_hidden_cuda);
hipFree(input_hidden_cuda);
hipFree(hidden_partial_sum);
hipFree(input_prev_weights_cuda);
hipFree(hidden_delta_cuda);
free(partial_sum);
free(input_weights_one_dim);
free(input_weights_prev_one_dim);
#endif
}
| 977a40624261616f24805934a133d3ff18de1e9b.cu |
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <sys/time.h>
// includes, kernels
#include "backprop_cuda_kernel.cu"
#include "backprop.h"
////////////////////////////////////////////////////////////////////////////////
extern "C"
void bpnn_layerforward(float *l1, float *l2, float **conn, int n1, int n2);
extern "C"
void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err);
extern "C"
void bpnn_hidden_error(float *delta_h, int nh, float *delta_o, int no, float **who, float *hidden, float *err);
extern "C"
void bpnn_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw);
extern "C"
int setup(int argc, char** argv);
extern "C"
float **alloc_2d_dbl(int m, int n);
extern "C"
float squash(float x);
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
unsigned int num_threads = 0;
unsigned int num_blocks = 0;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
setup(argc, argv);
}
extern "C"
void bpnn_train_cuda(BPNN *net, float *eo, float *eh)
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
#ifdef GPU
int m = 0;
float *input_hidden_cuda;
float *input_cuda;
float *output_hidden_cuda;
float *partial_sum;
float *hidden_partial_sum;
float *hidden_delta_cuda;
float *input_prev_weights_cuda;
float sum;
float *input_weights_one_dim;
float *input_weights_prev_one_dim;
num_blocks = in / 16;
dim3 grid( 1 , num_blocks);
dim3 threads(16 , 16);
input_weights_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float));
input_weights_prev_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float));
partial_sum = (float *) malloc(num_blocks * WIDTH * sizeof(float));
// this preprocessing stage is added to correct the bugs of wrong memcopy using two-dimensional net->inputweights
for (int k = 0; k <= in; k++) {
for (int j = 0; j <= hid; j++) {
input_weights_one_dim[m] = net->input_weights[k][j];
input_weights_prev_one_dim[m] = net-> input_prev_weights[k][j];
m++;
}
}
cudaMalloc((void**) &input_cuda, (in + 1) * sizeof(float));
cudaMalloc((void**) &output_hidden_cuda, (hid + 1) * sizeof(float));
cudaMalloc((void**) &input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float));
cudaMalloc((void**) &hidden_partial_sum, num_blocks * WIDTH * sizeof(float));
#endif
#ifdef CPU
printf("Performing CPU computation\n");
bpnn_layerforward(net->input_units, net->hidden_units,net->input_weights, in, hid);
#endif
#ifdef GPU
printf("Performing GPU computation\n");
//printf("in= %d, hid = %d, numblocks = %d\n", in, hid, num_blocks);
cudaMemcpy(input_cuda, net->input_units, (in + 1) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyHostToDevice);
bpnn_layerforward_CUDA<<< grid, threads >>>(input_cuda,
output_hidden_cuda,
input_hidden_cuda,
hidden_partial_sum,
in,
hid);
cudaThreadSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("bpnn kernel error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaMemcpy(partial_sum, hidden_partial_sum, num_blocks * WIDTH * sizeof(float), cudaMemcpyDeviceToHost);
for (int j = 1; j <= hid; j++) {
sum = 0.0;
for (int k = 0; k < num_blocks; k++) {
sum += partial_sum[k * hid + j-1] ;
}
sum += net->input_weights[0][j];
net-> hidden_units[j] = float(1.0 / (1.0 + exp(-sum)));
}
#endif
bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out);
bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err);
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights);
#ifdef CPU
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights);
#endif
#ifdef GPU
cudaMalloc((void**) &hidden_delta_cuda, (hid + 1) * sizeof(float));
cudaMalloc((void**) &input_prev_weights_cuda, (in + 1) * (hid + 1) * sizeof(float));
cudaMemcpy(hidden_delta_cuda, net->hidden_delta, (hid + 1) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(input_prev_weights_cuda, input_weights_prev_one_dim, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyHostToDevice);
bpnn_adjust_weights_cuda<<< grid, threads >>>(hidden_delta_cuda,
hid,
input_cuda,
in,
input_hidden_cuda,
input_prev_weights_cuda
);
cudaMemcpy(net->input_units, input_cuda, (in + 1) * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(input_weights_one_dim, input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(input_cuda);
cudaFree(output_hidden_cuda);
cudaFree(input_hidden_cuda);
cudaFree(hidden_partial_sum);
cudaFree(input_prev_weights_cuda);
cudaFree(hidden_delta_cuda);
free(partial_sum);
free(input_weights_one_dim);
free(input_weights_prev_one_dim);
#endif
}
|
6c0d4017368f16dbf1dd53c7f55895eba03f2d75.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************/
// The purpose of this file is to provide a GPU implementation of the
// heat transfer simulation using MATLAB.
//
// Author: Jason Lowden
// Date: October 20, 2013
//
// File: KMeans.h
/************************************************************************/
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <hip/hip_texture_types.h>
#include <iostream>
#include "HeatTransfer.h"
texture<float,2> texIn; // Input to texture memory
__global__ void UpdateHeatMapKernel(float * texOut,int size, float heatSpeed)
{
int col = threadIdx.x + blockIdx.x * blockDim.x; // Calculates the current column
int row = threadIdx.y + blockIdx.y * blockDim.y; // Calculate the current row
int offset = col + row * size; // indicates the cureent operating element
if(col > 0 && col < size-1 && row < size-1 && row > 0){
float top = tex2D(texIn, col, row-1); // element on top of current element
float left = tex2D(texIn, col-1, row); // element on left of current element
float right = tex2D(texIn, col+1, row); // element on right of current element
float bottom = tex2D(texIn, col, row+1);// element on bottom of current element
float current = tex2D(texIn, col, row); // Current element
float temp = heatSpeed * ( top + bottom + right + left - (4 * current)); // heat transfeered from other elements
texOut[offset] = current + temp; // New heat
}
}
// Calculates the updated heat map for a given size based on number of iterations
bool UpdateHeatMap(float* dataIn, float* dataOut, int size, float heatSpeed, int numIterations)
{
hipError_t status; // to check success of cuda calls
int bytes = size * size * sizeof(float); // size of input data
hipArray_t dataIn_d; // Input data array
float* texOut; //Output from texture memory
//Allocation of device data
hipMalloc((void**)&texOut, bytes);
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Alloc failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
//Copying data to device memory
hipMemcpy(texOut, dataIn, bytes, hipMemcpyHostToDevice);
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Memcopy failed failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Desc failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
unsigned int flags=0;
// Allocate array in device
hipMallocArray(&dataIn_d, &desc,size, size);
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Array alloc failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
int size1= size * size *sizeof(float);
//Copy data into array
hipMemcpyToArray (dataIn_d, 0, 0, dataIn, size1, hipMemcpyHostToDevice);
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "memcpy to array failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
//Bind array to texture
hipBindTextureToArray (&texIn, dataIn_d, &desc);
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Cuda Binding failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
dim3 dimBlock(16,16); // Dimension of block
dim3 dimGrid((int)ceil((float)size / (float)16), (int)ceil((float)size / (float)16)); // Dynamic allocation for dimension of grid
for(int i = 0; i < numIterations; i++)
{
hipLaunchKernelGGL(( UpdateHeatMapKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, texOut, size, heatSpeed); // Calls heat map Kernel
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Cuda kernal failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
hipDeviceSynchronize(); // Cuda Synchronisation
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Sync failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
hipUnbindTexture (&texIn); // Unbind texture memory
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Unbind failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
hipMemcpyToArray (dataIn_d, 0, 0, texOut, size1, hipMemcpyDeviceToDevice); // Cuda memcpy to array within device
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "memcpy to array failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
hipBindTextureToArray (&texIn, dataIn_d, &desc); // Bind array to texture memory
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Bind failed: " << hipGetErrorString(status) << std::endl;
hipFree(texOut);
return false;
}
}
hipMemcpy(dataOut, texOut, bytes, hipMemcpyDeviceToHost); // Copy results to host
hipUnbindTexture (&texIn); // Unbind texture memory
hipFree(texOut); // Free cuda memory
hipFreeArray(dataIn_d); // Free cuda memory
return true;
} | 6c0d4017368f16dbf1dd53c7f55895eba03f2d75.cu | /************************************************************************/
// The purpose of this file is to provide a GPU implementation of the
// heat transfer simulation using MATLAB.
//
// Author: Jason Lowden
// Date: October 20, 2013
//
// File: KMeans.h
/************************************************************************/
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <cuda_texture_types.h>
#include <iostream>
#include "HeatTransfer.h"
texture<float,2> texIn; // Input to texture memory
__global__ void UpdateHeatMapKernel(float * texOut,int size, float heatSpeed)
{
int col = threadIdx.x + blockIdx.x * blockDim.x; // Calculates the current column
int row = threadIdx.y + blockIdx.y * blockDim.y; // Calculate the current row
int offset = col + row * size; // indicates the cureent operating element
if(col > 0 && col < size-1 && row < size-1 && row > 0){
float top = tex2D(texIn, col, row-1); // element on top of current element
float left = tex2D(texIn, col-1, row); // element on left of current element
float right = tex2D(texIn, col+1, row); // element on right of current element
float bottom = tex2D(texIn, col, row+1);// element on bottom of current element
float current = tex2D(texIn, col, row); // Current element
float temp = heatSpeed * ( top + bottom + right + left - (4 * current)); // heat transfeered from other elements
texOut[offset] = current + temp; // New heat
}
}
// Calculates the updated heat map for a given size based on number of iterations
bool UpdateHeatMap(float* dataIn, float* dataOut, int size, float heatSpeed, int numIterations)
{
cudaError_t status; // to check success of cuda calls
int bytes = size * size * sizeof(float); // size of input data
cudaArray_t dataIn_d; // Input data array
float* texOut; //Output from texture memory
//Allocation of device data
cudaMalloc((void**)&texOut, bytes);
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Alloc failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
//Copying data to device memory
cudaMemcpy(texOut, dataIn, bytes, cudaMemcpyHostToDevice);
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Memcopy failed failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Desc failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
unsigned int flags=0;
// Allocate array in device
cudaMallocArray(&dataIn_d, &desc,size, size);
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Array alloc failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
int size1= size * size *sizeof(float);
//Copy data into array
cudaMemcpyToArray (dataIn_d, 0, 0, dataIn, size1, cudaMemcpyHostToDevice);
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "memcpy to array failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
//Bind array to texture
cudaBindTextureToArray (&texIn, dataIn_d, &desc);
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Cuda Binding failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
dim3 dimBlock(16,16); // Dimension of block
dim3 dimGrid((int)ceil((float)size / (float)16), (int)ceil((float)size / (float)16)); // Dynamic allocation for dimension of grid
for(int i = 0; i < numIterations; i++)
{
UpdateHeatMapKernel<<<dimGrid, dimBlock>>>(texOut, size, heatSpeed); // Calls heat map Kernel
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Cuda kernal failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
cudaThreadSynchronize(); // Cuda Synchronisation
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Sync failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
cudaUnbindTexture (&texIn); // Unbind texture memory
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Unbind failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
cudaMemcpyToArray (dataIn_d, 0, 0, texOut, size1, cudaMemcpyDeviceToDevice); // Cuda memcpy to array within device
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "memcpy to array failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
cudaBindTextureToArray (&texIn, dataIn_d, &desc); // Bind array to texture memory
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Bind failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(texOut);
return false;
}
}
cudaMemcpy(dataOut, texOut, bytes, cudaMemcpyDeviceToHost); // Copy results to host
cudaUnbindTexture (&texIn); // Unbind texture memory
cudaFree(texOut); // Free cuda memory
cudaFreeArray(dataIn_d); // Free cuda memory
return true;
} |
c04967dfb5b249f9a0761dd7edbb246a16ea6e6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
#define delta 10
#define rows 50
#define columns 50
__global__ void SomeKernel(int* res, int* data, int col, int row,int y, int step)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
//
int currDelta = 0;
for (int i=step*threadId; (i<(threadId+1)*step) && (i < col); i++) //
{
for (int j = y; j > 0; j--) //
{
currDelta = data[i + j*row] - data[i + (j-1)*row];
// , -
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > 10){
res[i] = j-1;
break;
}
}
}
} | c04967dfb5b249f9a0761dd7edbb246a16ea6e6c.cu | #include "includes.h"
using namespace std;
#define delta 10
#define rows 50
#define columns 50
__global__ void SomeKernel(int* res, int* data, int col, int row,int y, int step)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
//Считаем идентификатор текущего потока
int currDelta = 0;
for (int i=step*threadId; (i<(threadId+1)*step) && (i < col); i++) //Работа со столбцами по потокам
{
for (int j = y; j > 0; j--) //Здесь работа со строками
{
currDelta = data[i + j*row] - data[i + (j-1)*row];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > 10){
res[i] = j-1;
break;
}
}
}
} |
dd2ba7ff1a0e2a250620ee882569bd49d47c9ca3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <iostream>
#include <algorithm>
#include <chrono>
#include <hip/hip_runtime.h>
#include "FDTD3dGPU.h"
#include "shrUtils.h"
__global__ void finite_difference(
float*__restrict__ output,
const float*__restrict__ input,
const float*__restrict__ coef,
const int dimx, const int dimy, const int dimz,
const int padding)
{
bool valid = true;
const int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
const int gtidy = blockIdx.y * blockDim.y + threadIdx.y;
const int ltidx = threadIdx.x;
const int ltidy = threadIdx.y;
const int workx = blockDim.x;
const int worky = blockDim.y;
const int stride_y = dimx + 2 * k_radius_default;
const int stride_z = stride_y * (dimy + 2 * k_radius_default);
__shared__ float tile[k_blockDimMaxY + 2 * k_radius_default][k_blockDimMaxX + 2 * k_radius_default];
int inputIndex = 0;
int outputIndex = 0;
// Advance inputIndex to start of inner volume
inputIndex += k_radius_default * stride_y + k_radius_default + padding;
// Advance inputIndex to target element
inputIndex += gtidy * stride_y + gtidx;
float infront[k_radius_default];
float behind[k_radius_default];
float current;
const int tx = ltidx + k_radius_default;
const int ty = ltidy + k_radius_default;
if (gtidx >= dimx) valid = false;
if (gtidy >= dimy) valid = false;
// For simplicity we assume that the global size is equal to the actual
// problem size; since the global size must be a multiple of the local size
// this means the problem size must be a multiple of the local size (or
// padded to meet this constraint).
// Preload the "infront" and "behind" data
for (int i = k_radius_default - 2 ; i >= 0 ; i--)
{
behind[i] = input[inputIndex];
inputIndex += stride_z;
}
current = input[inputIndex];
outputIndex = inputIndex;
inputIndex += stride_z;
for (int i = 0 ; i < k_radius_default ; i++)
{
infront[i] = input[inputIndex];
inputIndex += stride_z;
}
// Step through the xy-planes
for (int iz = 0 ; iz < dimz ; iz++)
{
// Advance the slice (move the thread-front)
for (int i = k_radius_default - 1 ; i > 0 ; i--)
behind[i] = behind[i - 1];
behind[0] = current;
current = infront[0];
for (int i = 0 ; i < k_radius_default - 1 ; i++)
infront[i] = infront[i + 1];
infront[k_radius_default - 1] = input[inputIndex];
inputIndex += stride_z;
outputIndex += stride_z;
__syncthreads();
// Note that for the work items on the boundary of the problem, the
// supplied index when reading the halo (below) may wrap to the
// previous/next row or even the previous/next xy-plane. This is
// acceptable since a) we disable the output write for these work
// items and b) there is at least one xy-plane before/after the
// current plane, so the access will be within bounds.
// Update the data slice in the local tile
// Halo above & below
if (ltidy < k_radius_default)
{
tile[ltidy][tx] = input[outputIndex - k_radius_default * stride_y];
tile[ltidy + worky + k_radius_default][tx] = input[outputIndex + worky * stride_y];
}
// Halo left & right
if (ltidx < k_radius_default)
{
tile[ty][ltidx] = input[outputIndex - k_radius_default];
tile[ty][ltidx + workx + k_radius_default] = input[outputIndex + workx];
}
tile[ty][tx] = current;
__syncthreads();
// Compute the output value
float value = coef[0] * current;
for (int i = 1 ; i <= k_radius_default ; i++)
{
value += coef[i] * (infront[i-1] + behind[i-1] + tile[ty - i][tx] +
tile[ty + i][tx] + tile[ty][tx - i] + tile[ty][tx + i]);
}
// Store the output value
if (valid) output[outputIndex] = value;
}
}
bool fdtdGPU(float *output, const float *input, const float *coeff,
const int dimx, const int dimy, const int dimz, const int radius,
const int timesteps, const int argc, const char **argv)
{
bool ok = true;
const int outerDimx = dimx + 2 * radius;
const int outerDimy = dimy + 2 * radius;
const int outerDimz = dimz + 2 * radius;
const size_t volumeSize = outerDimx * outerDimy * outerDimz;
size_t gridSize[2];
size_t blockSize[2];
// Ensure that the inner data starts on a 128B boundary
const int padding = (128 / sizeof(float)) - radius;
const size_t paddedVolumeSize = volumeSize + padding;
// Create memory buffer objects
float* bufferOut;
hipMalloc((void**)&bufferOut, paddedVolumeSize * sizeof(float));
float* bufferIn;
hipMalloc((void**)&bufferIn, paddedVolumeSize * sizeof(float));
float* bufferCoef;
hipMalloc((void**)&bufferCoef, (radius+1) * sizeof(float));
hipMemcpy(bufferCoef, coeff, (radius+1) * sizeof(float), hipMemcpyHostToDevice);
// Set the maximum work group size
size_t maxWorkSize = 256;
// Set the work group size
blockSize[0] = k_localWorkX;
blockSize[1] = maxWorkSize / k_localWorkX;
gridSize[0] = (unsigned int)ceil((float)dimx / blockSize[0]);
gridSize[1] = (unsigned int)ceil((float)dimy / blockSize[1]);
shrLog(" set block size to %dx%d\n", blockSize[0], blockSize[1]);
shrLog(" set grid size to %dx%d\n", gridSize[0], gridSize[1]);
dim3 grid (gridSize[0], gridSize[1]);
dim3 block (blockSize[0], blockSize[1]);
// Copy the input to the device input buffer
// offset = padding * 4, bytes = volumeSize * 4
hipMemcpy(bufferIn + padding, input, volumeSize * sizeof(float), hipMemcpyHostToDevice);
// Copy the input to the device output buffer (actually only need the halo)
hipMemcpy(bufferOut + padding, input, volumeSize * sizeof(float), hipMemcpyHostToDevice);
// Execute the FDTD
shrLog(" GPU FDTD loop\n");
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int it = 0 ; it < timesteps ; it++)
{
// Launch the kernel
hipLaunchKernelGGL(( finite_difference), dim3(grid), dim3(block), 0, 0, bufferOut, bufferIn, bufferCoef, dimx, dimy, dimz, padding);
// Toggle the buffers
float* tmp = bufferIn;
bufferIn = bufferOut;
bufferOut = tmp;
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / timesteps);
// Read the result back, result is in bufferSrc (after final toggle)
hipMemcpy(output, bufferIn + padding, volumeSize * sizeof(float), hipMemcpyDeviceToHost);
hipFree(bufferIn);
hipFree(bufferOut);
hipFree(bufferCoef);
return ok;
}
| dd2ba7ff1a0e2a250620ee882569bd49d47c9ca3.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <iostream>
#include <algorithm>
#include <chrono>
#include <cuda.h>
#include "FDTD3dGPU.h"
#include "shrUtils.h"
__global__ void finite_difference(
float*__restrict__ output,
const float*__restrict__ input,
const float*__restrict__ coef,
const int dimx, const int dimy, const int dimz,
const int padding)
{
bool valid = true;
const int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
const int gtidy = blockIdx.y * blockDim.y + threadIdx.y;
const int ltidx = threadIdx.x;
const int ltidy = threadIdx.y;
const int workx = blockDim.x;
const int worky = blockDim.y;
const int stride_y = dimx + 2 * k_radius_default;
const int stride_z = stride_y * (dimy + 2 * k_radius_default);
__shared__ float tile[k_blockDimMaxY + 2 * k_radius_default][k_blockDimMaxX + 2 * k_radius_default];
int inputIndex = 0;
int outputIndex = 0;
// Advance inputIndex to start of inner volume
inputIndex += k_radius_default * stride_y + k_radius_default + padding;
// Advance inputIndex to target element
inputIndex += gtidy * stride_y + gtidx;
float infront[k_radius_default];
float behind[k_radius_default];
float current;
const int tx = ltidx + k_radius_default;
const int ty = ltidy + k_radius_default;
if (gtidx >= dimx) valid = false;
if (gtidy >= dimy) valid = false;
// For simplicity we assume that the global size is equal to the actual
// problem size; since the global size must be a multiple of the local size
// this means the problem size must be a multiple of the local size (or
// padded to meet this constraint).
// Preload the "infront" and "behind" data
for (int i = k_radius_default - 2 ; i >= 0 ; i--)
{
behind[i] = input[inputIndex];
inputIndex += stride_z;
}
current = input[inputIndex];
outputIndex = inputIndex;
inputIndex += stride_z;
for (int i = 0 ; i < k_radius_default ; i++)
{
infront[i] = input[inputIndex];
inputIndex += stride_z;
}
// Step through the xy-planes
for (int iz = 0 ; iz < dimz ; iz++)
{
// Advance the slice (move the thread-front)
for (int i = k_radius_default - 1 ; i > 0 ; i--)
behind[i] = behind[i - 1];
behind[0] = current;
current = infront[0];
for (int i = 0 ; i < k_radius_default - 1 ; i++)
infront[i] = infront[i + 1];
infront[k_radius_default - 1] = input[inputIndex];
inputIndex += stride_z;
outputIndex += stride_z;
__syncthreads();
// Note that for the work items on the boundary of the problem, the
// supplied index when reading the halo (below) may wrap to the
// previous/next row or even the previous/next xy-plane. This is
// acceptable since a) we disable the output write for these work
// items and b) there is at least one xy-plane before/after the
// current plane, so the access will be within bounds.
// Update the data slice in the local tile
// Halo above & below
if (ltidy < k_radius_default)
{
tile[ltidy][tx] = input[outputIndex - k_radius_default * stride_y];
tile[ltidy + worky + k_radius_default][tx] = input[outputIndex + worky * stride_y];
}
// Halo left & right
if (ltidx < k_radius_default)
{
tile[ty][ltidx] = input[outputIndex - k_radius_default];
tile[ty][ltidx + workx + k_radius_default] = input[outputIndex + workx];
}
tile[ty][tx] = current;
__syncthreads();
// Compute the output value
float value = coef[0] * current;
for (int i = 1 ; i <= k_radius_default ; i++)
{
value += coef[i] * (infront[i-1] + behind[i-1] + tile[ty - i][tx] +
tile[ty + i][tx] + tile[ty][tx - i] + tile[ty][tx + i]);
}
// Store the output value
if (valid) output[outputIndex] = value;
}
}
bool fdtdGPU(float *output, const float *input, const float *coeff,
const int dimx, const int dimy, const int dimz, const int radius,
const int timesteps, const int argc, const char **argv)
{
bool ok = true;
const int outerDimx = dimx + 2 * radius;
const int outerDimy = dimy + 2 * radius;
const int outerDimz = dimz + 2 * radius;
const size_t volumeSize = outerDimx * outerDimy * outerDimz;
size_t gridSize[2];
size_t blockSize[2];
// Ensure that the inner data starts on a 128B boundary
const int padding = (128 / sizeof(float)) - radius;
const size_t paddedVolumeSize = volumeSize + padding;
// Create memory buffer objects
float* bufferOut;
cudaMalloc((void**)&bufferOut, paddedVolumeSize * sizeof(float));
float* bufferIn;
cudaMalloc((void**)&bufferIn, paddedVolumeSize * sizeof(float));
float* bufferCoef;
cudaMalloc((void**)&bufferCoef, (radius+1) * sizeof(float));
cudaMemcpy(bufferCoef, coeff, (radius+1) * sizeof(float), cudaMemcpyHostToDevice);
// Set the maximum work group size
size_t maxWorkSize = 256;
// Set the work group size
blockSize[0] = k_localWorkX;
blockSize[1] = maxWorkSize / k_localWorkX;
gridSize[0] = (unsigned int)ceil((float)dimx / blockSize[0]);
gridSize[1] = (unsigned int)ceil((float)dimy / blockSize[1]);
shrLog(" set block size to %dx%d\n", blockSize[0], blockSize[1]);
shrLog(" set grid size to %dx%d\n", gridSize[0], gridSize[1]);
dim3 grid (gridSize[0], gridSize[1]);
dim3 block (blockSize[0], blockSize[1]);
// Copy the input to the device input buffer
// offset = padding * 4, bytes = volumeSize * 4
cudaMemcpy(bufferIn + padding, input, volumeSize * sizeof(float), cudaMemcpyHostToDevice);
// Copy the input to the device output buffer (actually only need the halo)
cudaMemcpy(bufferOut + padding, input, volumeSize * sizeof(float), cudaMemcpyHostToDevice);
// Execute the FDTD
shrLog(" GPU FDTD loop\n");
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int it = 0 ; it < timesteps ; it++)
{
// Launch the kernel
finite_difference<<<grid, block>>>(bufferOut, bufferIn, bufferCoef, dimx, dimy, dimz, padding);
// Toggle the buffers
float* tmp = bufferIn;
bufferIn = bufferOut;
bufferOut = tmp;
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / timesteps);
// Read the result back, result is in bufferSrc (after final toggle)
cudaMemcpy(output, bufferIn + padding, volumeSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(bufferIn);
cudaFree(bufferOut);
cudaFree(bufferCoef);
return ok;
}
|
7c2d2e9d8af2f9c0e08e1b1cf24734659ad87357.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <read_gauge.h>
#include <gauge_field.h>
#include <fermion_force_quda.h>
#include <force_common.h>
#include <hw_quda.h>
namespace quda {
#define BLOCK_DIM 64
#define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, Vh)
#define LOAD_HW_SINGLE(hw_even, hw_odd, idx, var, oddness) do{ \
Float2* hw = (oddness)?hw_odd:hw_even; \
var##0 = hw[idx + 0*Vh]; \
var##1 = hw[idx + 1*Vh]; \
var##2 = hw[idx + 2*Vh]; \
var##3 = hw[idx + 3*Vh]; \
var##4 = hw[idx + 4*Vh]; \
var##5 = hw[idx + 5*Vh]; \
}while(0)
#define WRITE_HW_SINGLE(hw_even, hw_odd, idx, var, oddness) do{ \
Float2* hw = (oddness)?hw_odd:hw_even; \
hw[idx + 0*Vh] = var##0; \
hw[idx + 1*Vh] = var##1; \
hw[idx + 2*Vh] = var##2; \
hw[idx + 3*Vh] = var##3; \
hw[idx + 4*Vh] = var##4; \
hw[idx + 5*Vh] = var##5; \
}while(0)
#define LOAD_HW(hw_eve, hw_odd, idx, var, oddness) LOAD_HW_SINGLE(hw_eve, hw_odd, idx, var, oddness)
#define WRITE_HW(hw_even, hw_odd, idx, var, oddness) WRITE_HW_SINGLE(hw_even, hw_odd, idx, var, oddness)
#define LOAD_MATRIX(src, dir, idx, var) LOAD_MATRIX_12_SINGLE(src, dir, idx, var, Vh)
#define FF_SITE_MATRIX_LOAD_TEX 1
#define linkEvenTex siteLink0TexSingle_recon
#define linkOddTex siteLink1TexSingle_recon
#if (FF_SITE_MATRIX_LOAD_TEX == 1)
#define FF_LOAD_MATRIX(dir, idx, var, oddness) LOAD_MATRIX_12_SINGLE_TEX(((oddness)?linkOddTex:linkEvenTex), dir, idx, var, Vh)
#else
#define FF_LOAD_MATRIX(dir, idx, var, oddness) LOAD_MATRIX_12_SINGLE(((oddness)?linkOdd:linkEven), dir, idx, var, Vh)
#endif
#define linka00_re LINKA0.x
#define linka00_im LINKA0.y
#define linka01_re LINKA0.z
#define linka01_im LINKA0.w
#define linka02_re LINKA1.x
#define linka02_im LINKA1.y
#define linka10_re LINKA1.z
#define linka10_im LINKA1.w
#define linka11_re LINKA2.x
#define linka11_im LINKA2.y
#define linka12_re LINKA2.z
#define linka12_im LINKA2.w
#define linka20_re LINKA3.x
#define linka20_im LINKA3.y
#define linka21_re LINKA3.z
#define linka21_im LINKA3.w
#define linka22_re LINKA4.x
#define linka22_im LINKA4.y
#define linkb00_re LINKB0.x
#define linkb00_im LINKB0.y
#define linkb01_re LINKB0.z
#define linkb01_im LINKB0.w
#define linkb02_re LINKB1.x
#define linkb02_im LINKB1.y
#define linkb10_re LINKB1.z
#define linkb10_im LINKB1.w
#define linkb11_re LINKB2.x
#define linkb11_im LINKB2.y
#define linkb12_re LINKB2.z
#define linkb12_im LINKB2.w
#define linkb20_re LINKB3.x
#define linkb20_im LINKB3.y
#define linkb21_re LINKB3.z
#define linkb21_im LINKB3.w
#define linkb22_re LINKB4.x
#define linkb22_im LINKB4.y
#define MAT_MUL_HW(M, HW, HWOUT) \
HWOUT##00_re = (M##00_re * HW##00_re - M##00_im * HW##00_im) \
+ (M##01_re * HW##01_re - M##01_im * HW##01_im) \
+ (M##02_re * HW##02_re - M##02_im * HW##02_im); \
HWOUT##00_im = (M##00_re * HW##00_im + M##00_im * HW##00_re) \
+ (M##01_re * HW##01_im + M##01_im * HW##01_re) \
+ (M##02_re * HW##02_im + M##02_im * HW##02_re); \
HWOUT##01_re = (M##10_re * HW##00_re - M##10_im * HW##00_im) \
+ (M##11_re * HW##01_re - M##11_im * HW##01_im) \
+ (M##12_re * HW##02_re - M##12_im * HW##02_im); \
HWOUT##01_im = (M##10_re * HW##00_im + M##10_im * HW##00_re) \
+ (M##11_re * HW##01_im + M##11_im * HW##01_re) \
+ (M##12_re * HW##02_im + M##12_im * HW##02_re); \
HWOUT##02_re = (M##20_re * HW##00_re - M##20_im * HW##00_im) \
+ (M##21_re * HW##01_re - M##21_im * HW##01_im) \
+ (M##22_re * HW##02_re - M##22_im * HW##02_im); \
HWOUT##02_im = (M##20_re * HW##00_im + M##20_im * HW##00_re) \
+ (M##21_re * HW##01_im + M##21_im * HW##01_re) \
+ (M##22_re * HW##02_im + M##22_im * HW##02_re); \
HWOUT##10_re = (M##00_re * HW##10_re - M##00_im * HW##10_im) \
+ (M##01_re * HW##11_re - M##01_im * HW##11_im) \
+ (M##02_re * HW##12_re - M##02_im * HW##12_im); \
HWOUT##10_im = (M##00_re * HW##10_im + M##00_im * HW##10_re) \
+ (M##01_re * HW##11_im + M##01_im * HW##11_re) \
+ (M##02_re * HW##12_im + M##02_im * HW##12_re); \
HWOUT##11_re = (M##10_re * HW##10_re - M##10_im * HW##10_im) \
+ (M##11_re * HW##11_re - M##11_im * HW##11_im) \
+ (M##12_re * HW##12_re - M##12_im * HW##12_im); \
HWOUT##11_im = (M##10_re * HW##10_im + M##10_im * HW##10_re) \
+ (M##11_re * HW##11_im + M##11_im * HW##11_re) \
+ (M##12_re * HW##12_im + M##12_im * HW##12_re); \
HWOUT##12_re = (M##20_re * HW##10_re - M##20_im * HW##10_im) \
+ (M##21_re * HW##11_re - M##21_im * HW##11_im) \
+ (M##22_re * HW##12_re - M##22_im * HW##12_im); \
HWOUT##12_im = (M##20_re * HW##10_im + M##20_im * HW##10_re) \
+ (M##21_re * HW##11_im + M##21_im * HW##11_re) \
+ (M##22_re * HW##12_im + M##22_im * HW##12_re);
#define ADJ_MAT_MUL_HW(M, HW, HWOUT) \
HWOUT##00_re = (M##00_re * HW##00_re + M##00_im * HW##00_im) \
+ (M##10_re * HW##01_re + M##10_im * HW##01_im) \
+ (M##20_re * HW##02_re + M##20_im * HW##02_im); \
HWOUT##00_im = (M##00_re * HW##00_im - M##00_im * HW##00_re) \
+ (M##10_re * HW##01_im - M##10_im * HW##01_re) \
+ (M##20_re * HW##02_im - M##20_im * HW##02_re); \
HWOUT##01_re = (M##01_re * HW##00_re + M##01_im * HW##00_im) \
+ (M##11_re * HW##01_re + M##11_im * HW##01_im) \
+ (M##21_re * HW##02_re + M##21_im * HW##02_im); \
HWOUT##01_im = (M##01_re * HW##00_im - M##01_im * HW##00_re) \
+ (M##11_re * HW##01_im - M##11_im * HW##01_re) \
+ (M##21_re * HW##02_im - M##21_im * HW##02_re); \
HWOUT##02_re = (M##02_re * HW##00_re + M##02_im * HW##00_im) \
+ (M##12_re * HW##01_re + M##12_im * HW##01_im) \
+ (M##22_re * HW##02_re + M##22_im * HW##02_im); \
HWOUT##02_im = (M##02_re * HW##00_im - M##02_im * HW##00_re) \
+ (M##12_re * HW##01_im - M##12_im * HW##01_re) \
+ (M##22_re * HW##02_im - M##22_im * HW##02_re); \
HWOUT##10_re = (M##00_re * HW##10_re + M##00_im * HW##10_im) \
+ (M##10_re * HW##11_re + M##10_im * HW##11_im) \
+ (M##20_re * HW##12_re + M##20_im * HW##12_im); \
HWOUT##10_im = (M##00_re * HW##10_im - M##00_im * HW##10_re) \
+ (M##10_re * HW##11_im - M##10_im * HW##11_re) \
+ (M##20_re * HW##12_im - M##20_im * HW##12_re); \
HWOUT##11_re = (M##01_re * HW##10_re + M##01_im * HW##10_im) \
+ (M##11_re * HW##11_re + M##11_im * HW##11_im) \
+ (M##21_re * HW##12_re + M##21_im * HW##12_im); \
HWOUT##11_im = (M##01_re * HW##10_im - M##01_im * HW##10_re) \
+ (M##11_re * HW##11_im - M##11_im * HW##11_re) \
+ (M##21_re * HW##12_im - M##21_im * HW##12_re); \
HWOUT##12_re = (M##02_re * HW##10_re + M##02_im * HW##10_im) \
+ (M##12_re * HW##11_re + M##12_im * HW##11_im) \
+ (M##22_re * HW##12_re + M##22_im * HW##12_im); \
HWOUT##12_im = (M##02_re * HW##10_im - M##02_im * HW##10_re) \
+ (M##12_re * HW##11_im - M##12_im * HW##11_re) \
+ (M##22_re * HW##12_im - M##22_im * HW##12_re);
#define SU3_PROJECTOR(va, vb, m) \
m##00_re = va##0_re * vb##0_re + va##0_im * vb##0_im; \
m##00_im = va##0_im * vb##0_re - va##0_re * vb##0_im; \
m##01_re = va##0_re * vb##1_re + va##0_im * vb##1_im; \
m##01_im = va##0_im * vb##1_re - va##0_re * vb##1_im; \
m##02_re = va##0_re * vb##2_re + va##0_im * vb##2_im; \
m##02_im = va##0_im * vb##2_re - va##0_re * vb##2_im; \
m##10_re = va##1_re * vb##0_re + va##1_im * vb##0_im; \
m##10_im = va##1_im * vb##0_re - va##1_re * vb##0_im; \
m##11_re = va##1_re * vb##1_re + va##1_im * vb##1_im; \
m##11_im = va##1_im * vb##1_re - va##1_re * vb##1_im; \
m##12_re = va##1_re * vb##2_re + va##1_im * vb##2_im; \
m##12_im = va##1_im * vb##2_re - va##1_re * vb##2_im; \
m##20_re = va##2_re * vb##0_re + va##2_im * vb##0_im; \
m##20_im = va##2_im * vb##0_re - va##2_re * vb##0_im; \
m##21_re = va##2_re * vb##1_re + va##2_im * vb##1_im; \
m##21_im = va##2_im * vb##1_re - va##2_re * vb##1_im; \
m##22_re = va##2_re * vb##2_re + va##2_im * vb##2_im; \
m##22_im = va##2_im * vb##2_re - va##2_re * vb##2_im;
//vc = va + vb*s
#define SCALAR_MULT_ADD_SU3_VECTOR(va, vb, s, vc) do { \
vc##0_re = va##0_re + vb##0_re * s; \
vc##0_im = va##0_im + vb##0_im * s; \
vc##1_re = va##1_re + vb##1_re * s; \
vc##1_im = va##1_im + vb##1_im * s; \
vc##2_re = va##2_re + vb##2_re * s; \
vc##2_im = va##2_im + vb##2_im * s; \
}while (0)
#define FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mydir, idx, new_idx) do { \
switch(mydir){ \
case 0: \
new_idx = ( (new_x1==X1m1)?idx-X1m1:idx+1); \
new_x1 = (new_x1==X1m1)?0:new_x1+1; \
break; \
case 1: \
new_idx = ( (new_x2==X2m1)?idx-X2X1mX1:idx+X1); \
new_x2 = (new_x2==X2m1)?0:new_x2+1; \
break; \
case 2: \
new_idx = ( (new_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \
new_x3 = (new_x3==X3m1)?0:new_x3+1; \
break; \
case 3: \
new_idx = ( (new_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \
new_x4 = (new_x4==X4m1)?0:new_x4+1; \
break; \
} \
}while(0)
#define FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mydir, idx, new_idx) do { \
switch(mydir){ \
case 0: \
new_idx = ( (new_x1==0)?idx+X1m1:idx-1); \
new_x1 = (new_x1==0)?X1m1:new_x1 - 1; \
break; \
case 1: \
new_idx = ( (new_x2==0)?idx+X2X1mX1:idx-X1); \
new_x2 = (new_x2==0)?X2m1:new_x2 - 1; \
break; \
case 2: \
new_idx = ( (new_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \
new_x3 = (new_x3==0)?X3m1:new_x3 - 1; \
break; \
case 3: \
new_idx = ( (new_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \
new_x4 = (new_x4==0)?X4m1:new_x4 - 1; \
break; \
} \
}while(0)
#define FF_COMPUTE_NEW_FULL_IDX_PLUS(old_x1, old_x2, old_x3, old_x4, idx, mydir, new_idx) do { \
switch(mydir){ \
case 0: \
new_idx = ( (old_x1==X1m1)?idx-X1m1:idx+1); \
break; \
case 1: \
new_idx = ( (old_x2==X2m1)?idx-X2X1mX1:idx+X1); \
break; \
case 2: \
new_idx = ( (old_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \
break; \
case 3: \
new_idx = ( (old_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \
break; \
} \
}while(0)
#define FF_COMPUTE_NEW_FULL_IDX_MINUS(old_x1, old_x2, old_x3, old_x4, idx, mydir, new_idx) do { \
switch(mydir){ \
case 0: \
new_idx = ( (old_x1==0)?idx+X1m1:idx-1); \
break; \
case 1: \
new_idx = ( (old_x2==0)?idx+X2X1mX1:idx-X1); \
break; \
case 2: \
new_idx = ( (old_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \
break; \
case 3: \
new_idx = ( (old_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \
break; \
} \
}while(0)
//this macro require linka, linkb, and ah variables defined
#define ADD_FORCE_TO_MOM(hw1, hw2, idx, dir, cf,oddness) do{ \
Float2 my_coeff; \
int mydir; \
if (GOES_BACKWARDS(dir)){ \
mydir=OPP_DIR(dir); \
my_coeff.x = -cf.x; \
my_coeff.y = -cf.y; \
}else{ \
mydir=dir; \
my_coeff.x = cf.x; \
my_coeff.y = cf.y; \
} \
Float2 tmp_coeff; \
tmp_coeff.x = my_coeff.x; \
tmp_coeff.y = my_coeff.y; \
if(oddness){ \
tmp_coeff.x = - my_coeff.x; \
tmp_coeff.y = - my_coeff.y; \
} \
Float2* mom = oddness?momOdd:momEven; \
LOAD_ANTI_HERMITIAN(mom, mydir, idx, AH); \
UNCOMPRESS_ANTI_HERMITIAN(ah, linka); \
SU3_PROJECTOR(hw1##0, hw2##0, linkb); \
SCALAR_MULT_ADD_SU3_MATRIX(linka, linkb, tmp_coeff.x, linka); \
SU3_PROJECTOR(hw1##1, hw2##1, linkb); \
SCALAR_MULT_ADD_SU3_MATRIX(linka, linkb, tmp_coeff.y, linka); \
MAKE_ANTI_HERMITIAN(linka, ah); \
WRITE_ANTI_HERMITIAN(mom, mydir, idx, AH, Vh); \
}while(0)
#define FF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, i1,i2,i3,i4) do { \
sign =1; \
switch(dir){ \
case XUP: \
if ( (i4 & 1) == 1){ \
sign = -1; \
} \
break; \
case YUP: \
if ( ((i4+i1) & 1) == 1){ \
sign = -1; \
} \
break; \
case ZUP: \
if ( ((i4+i1+i2) & 1) == 1){ \
sign = -1; \
} \
break; \
case TUP: \
if (i4 == X4m1 ){ \
sign = -1; \
} \
break; \
} \
}while (0)
#define hwa00_re HWA0.x
#define hwa00_im HWA0.y
#define hwa01_re HWA1.x
#define hwa01_im HWA1.y
#define hwa02_re HWA2.x
#define hwa02_im HWA2.y
#define hwa10_re HWA3.x
#define hwa10_im HWA3.y
#define hwa11_re HWA4.x
#define hwa11_im HWA4.y
#define hwa12_re HWA5.x
#define hwa12_im HWA5.y
#define hwb00_re HWB0.x
#define hwb00_im HWB0.y
#define hwb01_re HWB1.x
#define hwb01_im HWB1.y
#define hwb02_re HWB2.x
#define hwb02_im HWB2.y
#define hwb10_re HWB3.x
#define hwb10_im HWB3.y
#define hwb11_re HWB4.x
#define hwb11_im HWB4.y
#define hwb12_re HWB5.x
#define hwb12_im HWB5.y
#define hwc00_re HWC0.x
#define hwc00_im HWC0.y
#define hwc01_re HWC1.x
#define hwc01_im HWC1.y
#define hwc02_re HWC2.x
#define hwc02_im HWC2.y
#define hwc10_re HWC3.x
#define hwc10_im HWC3.y
#define hwc11_re HWC4.x
#define hwc11_im HWC4.y
#define hwc12_re HWC5.x
#define hwc12_im HWC5.y
#define hwd00_re HWD0.x
#define hwd00_im HWD0.y
#define hwd01_re HWD1.x
#define hwd01_im HWD1.y
#define hwd02_re HWD2.x
#define hwd02_im HWD2.y
#define hwd10_re HWD3.x
#define hwd10_im HWD3.y
#define hwd11_re HWD4.x
#define hwd11_im HWD4.y
#define hwd12_re HWD5.x
#define hwd12_im HWD5.y
#define hwe00_re HWE0.x
#define hwe00_im HWE0.y
#define hwe01_re HWE1.x
#define hwe01_im HWE1.y
#define hwe02_re HWE2.x
#define hwe02_im HWE2.y
#define hwe10_re HWE3.x
#define hwe10_im HWE3.y
#define hwe11_re HWE4.x
#define hwe11_im HWE4.y
#define hwe12_re HWE5.x
#define hwe12_im HWE5.y
void fermion_force_init_cuda(QudaGaugeParam* param)
{
#ifdef MULTI_GPU
#error "multi gpu is not supported for fermion force computation"
#endif
static int fermion_force_init_cuda_flag = 0;
if (fermion_force_init_cuda_flag) return;
fermion_force_init_cuda_flag=1;
}
/*
* This function computes contribution to mometum from the middle link in a staple
*
* tempx: IN
* Pmu: OUT
* P3: OUT
*
*/
template<int sig_positive, int mu_positive, int oddBit, typename Float2>
__global__ void
do_middle_link_kernel(Float2* tempxEven, Float2* tempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* P3Even, Float2* P3Odd,
int sig, int mu, Float2 coeff,
float4* linkEven, float4* linkOdd,
Float2* momEven, Float2* momOdd)
{
int sid = blockIdx.x * blockDim.x + threadIdx.x;
int z1 = sid / X1h;
int x1h = sid - z1*X1h;
int z2 = z1 / X2;
int x2 = z1 - z2*X2;
int x4 = z2 / X3;
int x3 = z2 - x4*X3;
int x1odd = (x2 + x3 + x4 + oddBit) & 1;
int x1 = 2*x1h + x1odd;
int X = 2*sid + x1odd;
int new_x1, new_x2, new_x3, new_x4;
int new_mem_idx;
int ad_link_sign=1;
int ab_link_sign=1;
int bc_link_sign=1;
Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5;
Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5;
Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5;
Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5;
float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4;
float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4;
Float2 AH0, AH1, AH2, AH3, AH4;
/* sig
* A________B
* mu | |
* D | |C
*
* A is the current point (sid)
*/
int point_b, point_c, point_d;
int ad_link_nbr_idx, ab_link_nbr_idx, bc_link_nbr_idx;
int mymu;
new_x1 = x1;
new_x2 = x2;
new_x3 = x3;
new_x4 = x4;
if(mu_positive){
mymu =mu;
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mu, X, new_mem_idx);
}else{
mymu = OPP_DIR(mu);
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(OPP_DIR(mu), X, new_mem_idx);
}
point_d = (new_mem_idx >> 1);
if (mu_positive){
ad_link_nbr_idx = point_d;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}else{
ad_link_nbr_idx = sid;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4);
}
int mysig;
if(sig_positive){
mysig = sig;
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, new_mem_idx, new_mem_idx);
}else{
mysig = OPP_DIR(sig);
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), new_mem_idx, new_mem_idx);
}
point_c = (new_mem_idx >> 1);
if (mu_positive){
bc_link_nbr_idx = point_c;
FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}
new_x1 = x1;
new_x2 = x2;
new_x3 = x3;
new_x4 = x4;
if(sig_positive){
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, X, new_mem_idx);
}else{
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), X, new_mem_idx);
}
point_b = (new_mem_idx >> 1);
if (!mu_positive){
bc_link_nbr_idx = point_b;
FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}
if(sig_positive){
ab_link_nbr_idx = sid;
FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, x1, x2, x3, x4);
}else{
ab_link_nbr_idx = point_b;
FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, new_x1,new_x2,new_x3,new_x4);
}
LOAD_HW(tempxEven, tempxOdd, point_d, HWA, 1-oddBit );
if(mu_positive){
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, 1-oddBit);
}else{
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, oddBit);
}
RECONSTRUCT_LINK_12(ad_link_sign, linka);
if (mu_positive){
ADJ_MAT_MUL_HW(linka, hwa, hwd);
}else{
MAT_MUL_HW(linka, hwa, hwd);
}
WRITE_HW(PmuEven,PmuOdd, sid, HWD, oddBit);
LOAD_HW(tempxEven,tempxOdd, point_c, HWA, oddBit);
if(mu_positive){
FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, oddBit);
}else{
FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, 1-oddBit);
}
RECONSTRUCT_LINK_12(bc_link_sign, linka);
if (mu_positive){
ADJ_MAT_MUL_HW(linka, hwa, hwb);
}else{
MAT_MUL_HW(linka, hwa, hwb);
}
if(sig_positive){
FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKB, oddBit);
}else{
FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKB, 1-oddBit);
}
RECONSTRUCT_LINK_12(ab_link_sign, linkb);
if (sig_positive){
MAT_MUL_HW(linkb, hwb, hwc);
}else{
ADJ_MAT_MUL_HW(linkb, hwb, hwc);
}
WRITE_HW(P3Even, P3Odd, sid, HWC, oddBit);
if (sig_positive){
//add the force to mom
ADD_FORCE_TO_MOM(hwc, hwd, sid, sig, coeff, oddBit);
}
}
template<typename Float2>
static void
middle_link_kernel(Float2* tempxEven, Float2* tempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* P3Even, Float2* P3Odd,
int sig, int mu, Float2 coeff,
float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink,
Float2* momEven, Float2* momOdd,
dim3 gridDim, dim3 BlockDim)
{
dim3 halfGridDim(gridDim.x/2, 1,1);
#define CALL_MIDDLE_LINK_KERNEL(sig_sign, mu_sign) \
hipLaunchKernelGGL(( do_middle_link_kernel<sig_sign, mu_sign,0>), dim3(halfGridDim), dim3(BlockDim), 0, 0, tempxEven, tempxOdd, \
PmuEven, PmuOdd, \
P3Even, P3Odd, \
sig, mu, coeff, \
linkEven, linkOdd, \
momEven, momOdd); \
hipLaunchKernelGGL(( do_middle_link_kernel<sig_sign, mu_sign, 1>), dim3(halfGridDim), dim3(BlockDim), 0, 0, tempxEven, tempxOdd, \
PmuEven, PmuOdd, \
P3Even, P3Odd, \
sig, mu, coeff, \
linkEven, linkOdd, \
momEven, momOdd);
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1, 1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1, 0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(0, 1);
}else{
CALL_MIDDLE_LINK_KERNEL(0, 0);
}
#undef CALL_MIDDLE_LINK_KERNEL
}
/*
* Computes contribution to momentum from the side links in a staple
*
* P3: IN
* P3mu: not used
* Tempx: IN
* Pmu: IN
* shortPE: OUT
*
*/
template<int sig_positive, int mu_positive, int oddBit, typename Float2>
__global__ void
do_side_link_kernel(Float2* P3Even, Float2* P3Odd,
Float2* P3muEven, Float2* P3muOdd,
Float2* TempxEven, Float2* TempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* shortPEven, Float2* shortPOdd,
int sig, int mu, Float2 coeff, Float2 accumu_coeff,
float4* linkEven, float4* linkOdd,
Float2* momEven, Float2* momOdd)
{
Float2 mcoeff;
mcoeff.x = -coeff.x;
mcoeff.y = -coeff.y;
int sid = blockIdx.x * blockDim.x + threadIdx.x;
int z1 = sid / X1h;
int x1h = sid - z1*X1h;
int z2 = z1 / X2;
int x2 = z1 - z2*X2;
int x4 = z2 / X3;
int x3 = z2 - x4*X3;
int x1odd = (x2 + x3 + x4 + oddBit) & 1;
int x1 = 2*x1h + x1odd;
int X = 2*sid + x1odd;
int ad_link_sign = 1;
Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5;
Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5;
Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5;
float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4;
float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4;
Float2 AH0, AH1, AH2, AH3, AH4;
/*
* compute the side link contribution to the momentum
*
*
* sig
* A________B
* | | mu
* D | |C
*
* A is the current point (sid)
*/
int point_d;
int ad_link_nbr_idx;
int mymu;
int new_mem_idx;
int new_x1 = x1;
int new_x2 = x2;
int new_x3 = x3;
int new_x4 = x4;
if(mu_positive){
mymu =mu;
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mymu,X, new_mem_idx);
}else{
mymu = OPP_DIR(mu);
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mymu, X, new_mem_idx);
}
point_d = (new_mem_idx >> 1);
if (mu_positive){
ad_link_nbr_idx = point_d;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}else{
ad_link_nbr_idx = sid;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4);
}
LOAD_HW(P3Even, P3Odd, sid, HWA, oddBit);
if(mu_positive){
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, 1 - oddBit);
}else{
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, oddBit);
}
RECONSTRUCT_LINK_12(ad_link_sign, linka);
if (mu_positive){
MAT_MUL_HW(linka, hwa, hwb);
}else{
ADJ_MAT_MUL_HW(linka, hwa, hwb);
}
//start to add side link force
if (mu_positive){
LOAD_HW(TempxEven, TempxOdd, point_d, HWC, 1-oddBit);
if (sig_positive){
ADD_FORCE_TO_MOM(hwb, hwc, point_d, mu, coeff, 1-oddBit);
}else{
ADD_FORCE_TO_MOM(hwc, hwb, point_d, OPP_DIR(mu), mcoeff, 1- oddBit);
}
}else{
LOAD_HW(PmuEven, PmuOdd, sid, HWC, oddBit);
if (sig_positive){
ADD_FORCE_TO_MOM(hwa, hwc, sid, mu, mcoeff, oddBit);
}else{
ADD_FORCE_TO_MOM(hwc, hwa, sid, OPP_DIR(mu), coeff, oddBit);
}
}
if (shortPOdd){
LOAD_HW(shortPEven, shortPOdd, point_d, HWA, 1-oddBit);
SCALAR_MULT_ADD_SU3_VECTOR(hwa0, hwb0, accumu_coeff.x, hwa0);
SCALAR_MULT_ADD_SU3_VECTOR(hwa1, hwb1, accumu_coeff.y, hwa1);
WRITE_HW(shortPEven, shortPOdd, point_d, HWA, 1-oddBit);
}
}
template<typename Float2>
static void
side_link_kernel(Float2* P3Even, Float2* P3Odd,
Float2* P3muEven, Float2* P3muOdd,
Float2* TempxEven, Float2* TempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* shortPEven, Float2* shortPOdd,
int sig, int mu, Float2 coeff, Float2 accumu_coeff,
float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink,
Float2* momEven, Float2* momOdd,
dim3 gridDim, dim3 blockDim)
{
dim3 halfGridDim(gridDim.x/2,1,1);
#define CALL_SIDE_LINK_KERNEL(sig_sign, mu_sign) \
hipLaunchKernelGGL(( do_side_link_kernel<sig_sign,mu_sign,0>), dim3(halfGridDim), dim3(blockDim), 0, 0, P3Even, P3Odd, \
P3muEven, P3muOdd, \
TempxEven, TempxOdd, \
PmuEven, PmuOdd, \
shortPEven, shortPOdd, \
sig, mu, coeff, accumu_coeff, \
linkEven, linkOdd, \
momEven, momOdd); \
hipLaunchKernelGGL(( do_side_link_kernel<sig_sign,mu_sign,1>), dim3(halfGridDim), dim3(blockDim), 0, 0, P3Even, P3Odd, \
P3muEven, P3muOdd, \
TempxEven, TempxOdd, \
PmuEven, PmuOdd, \
shortPEven, shortPOdd, \
sig, mu, coeff, accumu_coeff, \
linkEven, linkOdd, \
momEven, momOdd);
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(0,1);
}else{
CALL_SIDE_LINK_KERNEL(0,0);
}
#undef CALL_SIDE_LINK_KERNEL
}
/*
* This function computes the contribution to momentum from middle and side links
*
* tempx: IN
* Pmu: not used
* P3: not used
* P3mu: not used
* shortP: OUT
*
*/
template<int sig_positive, int mu_positive, int oddBit, typename Float2>
__global__ void
do_all_link_kernel(Float2* tempxEven, Float2* tempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* P3Even, Float2* P3Odd,
Float2* P3muEven, Float2* P3muOdd,
Float2* shortPEven, Float2* shortPOdd,
int sig, int mu, Float2 coeff, Float2 mcoeff, Float2 accumu_coeff,
float4* linkEven, float4* linkOdd,
Float2* momEven, Float2* momOdd)
{
int sid = blockIdx.x * blockDim.x + threadIdx.x;
int z1 = sid / X1h;
int x1h = sid - z1*X1h;
int z2 = z1 / X2;
int x2 = z1 - z2*X2;
int x4 = z2 / X3;
int x3 = z2 - x4*X3;
int x1odd = (x2 + x3 + x4 + oddBit) & 1;
int x1 = 2*x1h + x1odd;
int X = 2*sid + x1odd;
int new_x1, new_x2, new_x3, new_x4;
int ad_link_sign=1;
int ab_link_sign=1;
int bc_link_sign=1;
Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5;
Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5;
Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5;
Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5;
Float2 HWE0, HWE1, HWE2, HWE3, HWE4, HWE5;
float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4;
float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4;
float4 LINKC0, LINKC1, LINKC2, LINKC3, LINKC4;
Float2 AH0, AH1, AH2, AH3, AH4;
/* sig
* A________B
* mu | |
* D | |C
*
* A is the current point (sid)
*/
int point_b, point_c, point_d;
int ad_link_nbr_idx, ab_link_nbr_idx, bc_link_nbr_idx;
int mymu;
int new_mem_idx;
new_x1 = x1;
new_x2 = x2;
new_x3 = x3;
new_x4 = x4;
if(mu_positive){
mymu =mu;
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mu, X, new_mem_idx);
}else{
mymu = OPP_DIR(mu);
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(OPP_DIR(mu), X, new_mem_idx);
}
point_d = (new_mem_idx >> 1);
if (mu_positive){
ad_link_nbr_idx = point_d;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}else{
ad_link_nbr_idx = sid;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4);
}
int mysig;
if(sig_positive){
mysig = sig;
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, new_mem_idx, new_mem_idx);
}else{
mysig = OPP_DIR(sig);
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), new_mem_idx, new_mem_idx);
}
point_c = (new_mem_idx >> 1);
if (mu_positive){
bc_link_nbr_idx = point_c;
FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}
new_x1 = x1;
new_x2 = x2;
new_x3 = x3;
new_x4 = x4;
if(sig_positive){
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, X, new_mem_idx);
}else{
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), X, new_mem_idx);
}
point_b = (new_mem_idx >> 1);
if (!mu_positive){
bc_link_nbr_idx = point_b;
FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}
if(sig_positive){
ab_link_nbr_idx = sid;
FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, x1, x2, x3, x4);
}else{
ab_link_nbr_idx = point_b;
FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, new_x1,new_x2,new_x3,new_x4);
}
LOAD_HW(tempxEven, tempxOdd, point_d, HWE, 1-oddBit);
if (mu_positive){
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKC, 1-oddBit);
}else{
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKC, oddBit);
}
RECONSTRUCT_LINK_12(ad_link_sign, linkc);
if (mu_positive){
ADJ_MAT_MUL_HW(linkc, hwe, hwd);
}else{
MAT_MUL_HW(linkc, hwe, hwd);
}
//we do not need to write Pmu here
//WRITE_HW(myPmu, sid, HWD);
LOAD_HW(tempxEven, tempxOdd, point_c, HWA, oddBit);
if (mu_positive){
FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, oddBit);
}else{
FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, 1-oddBit);
}
RECONSTRUCT_LINK_12(bc_link_sign, linka);
if (mu_positive){
ADJ_MAT_MUL_HW(linka, hwa, hwb);
}else{
MAT_MUL_HW(linka, hwa, hwb);
}
if (sig_positive){
FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKA, oddBit);
}else{
FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKA, 1-oddBit);
}
RECONSTRUCT_LINK_12(ab_link_sign, linka);
if (sig_positive){
MAT_MUL_HW(linka, hwb, hwc);
}else{
ADJ_MAT_MUL_HW(linka, hwb, hwc);
}
//we do not need to write P3 here
//WRITE_HW(myP3, sid, HWC);
//The middle link contribution
if (sig_positive){
//add the force to mom
ADD_FORCE_TO_MOM(hwc, hwd, sid, sig, mcoeff, oddBit);
}
//P3 is hwc
//ad_link is linkc
if (mu_positive){
MAT_MUL_HW(linkc, hwc, hwa);
}else{
ADJ_MAT_MUL_HW(linkc, hwc, hwa);
}
//accumulate P7rho to P5
//WRITE_HW(otherP3mu, point_d, HWA);
LOAD_HW(shortPEven, shortPOdd, point_d, HWB, 1-oddBit);
SCALAR_MULT_ADD_SU3_VECTOR(hwb0, hwa0, accumu_coeff.x, hwb0);
SCALAR_MULT_ADD_SU3_VECTOR(hwb1, hwa1, accumu_coeff.y, hwb1);
WRITE_HW(shortPEven, shortPOdd, point_d, HWB, 1-oddBit);
//hwe holds tempx at point_d
//hwd holds Pmu at point A(sid)
if (mu_positive){
if (sig_positive){
ADD_FORCE_TO_MOM(hwa, hwe, point_d, mu, coeff, 1-oddBit);
}else{
ADD_FORCE_TO_MOM(hwe, hwa, point_d, OPP_DIR(mu), mcoeff, 1- oddBit);
}
}else{
if (sig_positive){
ADD_FORCE_TO_MOM(hwc, hwd, sid, mu, mcoeff, oddBit);
}else{
ADD_FORCE_TO_MOM(hwd, hwc, sid, OPP_DIR(mu), coeff, oddBit);
}
}
}
template<typename Float2>
static void
all_link_kernel(Float2* tempxEven, Float2* tempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* P3Even, Float2* P3Odd,
Float2* P3muEven, Float2* P3muOdd,
Float2* shortPEven, Float2* shortPOdd,
int sig, int mu, Float2 coeff, Float2 mcoeff, Float2 accumu_coeff,
float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink,
Float2* momEven, Float2* momOdd,
dim3 gridDim, dim3 blockDim)
{
dim3 halfGridDim(gridDim.x/2, 1,1);
#define CALL_ALL_LINK_KERNEL(sig_sign, mu_sign) \
hipLaunchKernelGGL(( do_all_link_kernel<sig_sign,mu_sign,0>), dim3(halfGridDim), dim3(blockDim), 0, 0, tempxEven, tempxOdd, \
PmuEven, PmuOdd, \
P3Even, P3Odd, \
P3muEven, P3muOdd, \
shortPEven, shortPOdd, \
sig, mu, coeff, mcoeff, accumu_coeff, \
linkEven, linkOdd, \
momEven, momOdd); \
hipLaunchKernelGGL(( do_all_link_kernel<sig_sign,mu_sign,1>), dim3(halfGridDim), dim3(blockDim), 0, 0, tempxEven, tempxOdd, \
PmuEven, PmuOdd, \
P3Even, P3Odd, \
P3muEven, P3muOdd, \
shortPEven, shortPOdd, \
sig, mu, coeff, mcoeff, accumu_coeff, \
linkEven, linkOdd, \
momEven, momOdd);
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_ALL_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_ALL_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_ALL_LINK_KERNEL(0,1);
}else{
CALL_ALL_LINK_KERNEL(0,0);
}
#undef CALL_ALL_LINK_KERNEL
}
/* This function computes the one and naik terms' contribution to momentum
*
* Tempx: IN
* Pmu: IN
* Pnumu: IN
*
*/
template <int oddBit, typename Float2>
__global__ void
do_one_and_naik_terms_kernel(Float2* TempxEven, Float2* TempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* PnumuEven, Float2* PnumuOdd,
int mu, Float2 OneLink, Float2 Naik, Float2 mNaik,
float4* linkEven, float4* linkOdd,
Float2* momEven, Float2* momOdd)
{
Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5;
Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5;
Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5;
Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5;
float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4;
float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4;
Float2 AH0, AH1, AH2, AH3, AH4;
int sid = blockIdx.x * blockDim.x + threadIdx.x;
int z1 = sid / X1h;
int x1h = sid - z1*X1h;
int z2 = z1 / X2;
int x2 = z1 - z2*X2;
int x4 = z2 / X3;
int x3 = z2 - x4*X3;
int x1odd = (x2 + x3 + x4 + oddBit) & 1;
int x1 = 2*x1h + x1odd;
//int X = 2*sid + x1odd;
int dx[4];
int new_x1, new_x2, new_x3, new_x4, new_idx;
int sign=1;
if (GOES_BACKWARDS(mu)){
//The one link
LOAD_HW(PmuEven, PmuOdd, sid, HWA, oddBit);
LOAD_HW(TempxEven, TempxOdd, sid, HWB, oddBit);
ADD_FORCE_TO_MOM(hwa, hwb, sid, OPP_DIR(mu), OneLink, oddBit);
//Naik term
dx[3]=dx[2]=dx[1]=dx[0]=0;
dx[OPP_DIR(mu)] = -1;
new_x1 = (x1 + dx[0] + X1)%X1;
new_x2 = (x2 + dx[1] + X2)%X2;
new_x3 = (x3 + dx[2] + X3)%X3;
new_x4 = (x4 + dx[3] + X4)%X4;
new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1;
LOAD_HW(TempxEven, TempxOdd, new_idx, HWA, 1-oddBit);
FF_LOAD_MATRIX(OPP_DIR(mu), new_idx, LINKA, 1-oddBit);
FF_COMPUTE_RECONSTRUCT_SIGN(sign, OPP_DIR(mu), new_x1,new_x2,new_x3,new_x4);
RECONSTRUCT_LINK_12(sign, linka);
ADJ_MAT_MUL_HW(linka, hwa, hwc); //Popmu
LOAD_HW(PnumuEven, PnumuOdd, sid, HWD, oddBit);
ADD_FORCE_TO_MOM(hwd, hwc, sid, OPP_DIR(mu), mNaik, oddBit);
dx[3]=dx[2]=dx[1]=dx[0]=0;
dx[OPP_DIR(mu)] = 1;
new_x1 = (x1 + dx[0] + X1)%X1;
new_x2 = (x2 + dx[1] + X2)%X2;
new_x3 = (x3 + dx[2] + X3)%X3;
new_x4 = (x4 + dx[3] + X4)%X4;
new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1;
LOAD_HW(PnumuEven, PnumuOdd, new_idx, HWA, 1-oddBit);
FF_LOAD_MATRIX(OPP_DIR(mu), sid, LINKA, oddBit);
FF_COMPUTE_RECONSTRUCT_SIGN(sign, OPP_DIR(mu), x1, x2, x3, x4);
RECONSTRUCT_LINK_12(sign, linka);
MAT_MUL_HW(linka, hwa, hwc);
ADD_FORCE_TO_MOM(hwc, hwb, sid, OPP_DIR(mu), Naik, oddBit);
}else{
dx[3]=dx[2]=dx[1]=dx[0]=0;
dx[mu] = 1;
new_x1 = (x1 + dx[0] + X1)%X1;
new_x2 = (x2 + dx[1] + X2)%X2;
new_x3 = (x3 + dx[2] + X3)%X3;
new_x4 = (x4 + dx[3] + X4)%X4;
new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1;
LOAD_HW(TempxEven, TempxOdd, new_idx, HWA, 1-oddBit);
FF_LOAD_MATRIX(mu, sid, LINKA, oddBit);
FF_COMPUTE_RECONSTRUCT_SIGN(sign, mu, x1, x2, x3, x4);
RECONSTRUCT_LINK_12(sign, linka);
MAT_MUL_HW(linka, hwa, hwb);
LOAD_HW(PnumuEven, PnumuOdd, sid, HWC, oddBit);
ADD_FORCE_TO_MOM(hwb, hwc, sid, mu, Naik, oddBit);
}
}
template<typename Float2>
static void
one_and_naik_terms_kernel(Float2* TempxEven, Float2* TempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* PnumuEven, Float2* PnumuOdd,
int mu, Float2 OneLink, Float2 Naik, Float2 mNaik,
float4* linkEven, float4* linkOdd,
Float2* momEven, Float2* momOdd,
dim3 gridDim, dim3 blockDim)
{
dim3 halfGridDim(gridDim.x/2, 1,1);
hipLaunchKernelGGL(( do_one_and_naik_terms_kernel<0>), dim3(halfGridDim), dim3(blockDim), 0, 0, TempxEven, TempxOdd,
PmuEven, PmuOdd,
PnumuEven, PnumuOdd,
mu, OneLink, Naik, mNaik,
linkEven, linkOdd,
momEven, momOdd);
hipLaunchKernelGGL(( do_one_and_naik_terms_kernel<1>), dim3(halfGridDim), dim3(blockDim), 0, 0, TempxEven, TempxOdd,
PmuEven, PmuOdd,
PnumuEven, PnumuOdd,
mu, OneLink, Naik, mNaik,
linkEven, linkOdd,
momEven, momOdd);
return;
}
#define Pmu tempvec[0]
#define Pnumu tempvec[1]
#define Prhonumu tempvec[2]
#define P7 tempvec[3]
#define P7rho tempvec[4]
#define P7rhonu tempvec[5]
#define P5 tempvec[6]
#define P3 tempvec[7]
#define P5nu tempvec[3]
#define P3mu tempvec[3]
#define Popmu tempvec[4]
#define Pmumumu tempvec[4]
template<typename Real>
static void
do_fermion_force_cuda(Real eps, Real weight1, Real weight2, Real* act_path_coeff, FullHw cudaHw,
cudaGaugeField &siteLink, cudaGaugeField &cudaMom, FullHw tempvec[8], QudaGaugeParam* param)
{
int mu, nu, rho, sig;
float2 coeff;
float2 OneLink, Lepage, Naik, FiveSt, ThreeSt, SevenSt;
float2 mNaik, mLepage, mFiveSt, mThreeSt, mSevenSt;
Real ferm_epsilon;
ferm_epsilon = 2.0*weight1*eps;
OneLink.x = act_path_coeff[0]*ferm_epsilon ;
Naik.x = act_path_coeff[1]*ferm_epsilon ; mNaik.x = -Naik.x;
ThreeSt.x = act_path_coeff[2]*ferm_epsilon ; mThreeSt.x = -ThreeSt.x;
FiveSt.x = act_path_coeff[3]*ferm_epsilon ; mFiveSt.x = -FiveSt.x;
SevenSt.x = act_path_coeff[4]*ferm_epsilon ; mSevenSt.x = -SevenSt.x;
Lepage.x = act_path_coeff[5]*ferm_epsilon ; mLepage.x = -Lepage.x;
ferm_epsilon = 2.0*weight2*eps;
OneLink.y = act_path_coeff[0]*ferm_epsilon ;
Naik.y = act_path_coeff[1]*ferm_epsilon ; mNaik.y = -Naik.y;
ThreeSt.y = act_path_coeff[2]*ferm_epsilon ; mThreeSt.y = -ThreeSt.y;
FiveSt.y = act_path_coeff[3]*ferm_epsilon ; mFiveSt.y = -FiveSt.y;
SevenSt.y = act_path_coeff[4]*ferm_epsilon ; mSevenSt.y = -SevenSt.y;
Lepage.y = act_path_coeff[5]*ferm_epsilon ; mLepage.y = -Lepage.y;
int DirectLinks[8] ;
for(mu=0;mu<8;mu++){
DirectLinks[mu] = 0 ;
}
int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3];
dim3 blockDim(BLOCK_DIM,1,1);
dim3 gridDim(volume/blockDim.x, 1, 1);
hipBindTexture(0, siteLink0TexSingle_recon, siteLink.Even_p(), siteLink.Bytes()/2);
hipBindTexture(0, siteLink1TexSingle_recon, siteLink.Odd_p(), siteLink.Bytes()/2);
for(sig=0; sig < 8; sig++){
for(mu = 0; mu < 8; mu++){
if ( (mu == sig) || (mu == OPP_DIR(sig))){
continue;
}
//3-link
//Kernel A: middle link
middle_link_kernel( (float2*)cudaHw.even.data, (float2*)cudaHw.odd.data,
(float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)P3.even.data, (float2*)P3.odd.data,
sig, mu, mThreeSt,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
for(nu=0; nu < 8; nu++){
if (nu == sig || nu == OPP_DIR(sig)
|| nu == mu || nu == OPP_DIR(mu)){
continue;
}
//5-link: middle link
//Kernel B
middle_link_kernel( (float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
(float2*)P5.even.data, (float2*)P5.odd.data,
sig, nu, FiveSt,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
for(rho =0; rho < 8; rho++){
if (rho == sig || rho == OPP_DIR(sig)
|| rho == mu || rho == OPP_DIR(mu)
|| rho == nu || rho == OPP_DIR(nu)){
continue;
}
//7-link: middle link and side link
//kernel C
if(FiveSt.x != 0)coeff.x = SevenSt.x/FiveSt.x ; else coeff.x = 0;
if(FiveSt.y != 0)coeff.y = SevenSt.y/FiveSt.y ; else coeff.y = 0;
all_link_kernel((float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
(float2*)Prhonumu.even.data, (float2*)Prhonumu.odd.data,
(float2*)P7.even.data, (float2*)P7.odd.data,
(float2*)P7rho.even.data, (float2*)P7rho.odd.data,
(float2*)P5.even.data, (float2*)P5.odd.data,
sig, rho, SevenSt,mSevenSt,coeff,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
}//rho
//5-link: side link
//kernel B2
if(ThreeSt.x != 0)coeff.x = FiveSt.x/ThreeSt.x ; else coeff.x = 0;
if(ThreeSt.y != 0)coeff.y = FiveSt.y/ThreeSt.y ; else coeff.y = 0;
side_link_kernel((float2*)P5.even.data, (float2*)P5.odd.data,
(float2*)P5nu.even.data, (float2*)P5nu.odd.data,
(float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
(float2*)P3.even.data, (float2*)P3.odd.data,
sig, nu, mFiveSt, coeff,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
}//nu
//lepage
//Kernel A2
middle_link_kernel( (float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
(float2*)P5.even.data, (float2*)P5.odd.data,
sig, mu, Lepage,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
if(ThreeSt.x != 0)coeff.x = Lepage.x/ThreeSt.x ; else coeff.x = 0;
if(ThreeSt.y != 0)coeff.y = Lepage.y/ThreeSt.y ; else coeff.y = 0;
side_link_kernel((float2*)P5.even.data, (float2*)P5.odd.data,
(float2*)P5nu.even.data, (float2*)P5nu.odd.data,
(float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
(float2*)P3.even.data, (float2*)P3.odd.data,
sig, mu, mLepage,coeff,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
//3-link side link
coeff.x=coeff.y=0;
side_link_kernel((float2*)P3.even.data, (float2*)P3.odd.data,
(float2*)P3mu.even.data, (float2*)P3mu.odd.data,
(float2*)cudaHw.even.data, (float2*)cudaHw.odd.data,
(float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)NULL, (float2*)NULL,
sig, mu, ThreeSt,coeff,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
//1-link and naik term
if (!DirectLinks[mu]){
DirectLinks[mu]=1;
//kernel Z
one_and_naik_terms_kernel((float2*)cudaHw.even.data, (float2*)cudaHw.odd.data,
(float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
mu, OneLink, Naik, mNaik,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(),
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
}
}//mu
}//sig
hipUnbindTexture(siteLink0TexSingle_recon);
hipUnbindTexture(siteLink1TexSingle_recon);
}
#undef Pmu
#undef Pnumu
#undef Prhonumu
#undef P7
#undef P7rho
#undef P7rhonu
#undef P5
#undef P3
#undef P5nu
#undef P3mu
#undef Popmu
#undef Pmumumu
void
fermion_force_cuda(double eps, double weight1, double weight2, void* act_path_coeff,
FullHw cudaHw, cudaGaugeField &siteLink, cudaGaugeField &cudaMom, QudaGaugeParam* param)
{
int i;
FullHw tempvec[8];
if (siteLink.Reconstruct() != QUDA_RECONSTRUCT_12)
errorQuda("Reconstruct type %d not supported for gauge field", siteLink.Reconstruct());
if (cudaMom.Reconstruct() != QUDA_RECONSTRUCT_10)
errorQuda("Reconstruct type %d not supported for momentum field", cudaMom.Reconstruct());
for(i=0;i < 8;i++){
tempvec[i] = createHwQuda(param->X, param->cuda_prec);
}
if (param->cuda_prec == QUDA_DOUBLE_PRECISION){
/*
do_fermion_force_cuda( (double)eps, (double)weight1, (double)weight2, (double*)act_path_coeff,
cudaHw, siteLink, cudaMom, tempvec, param);
*/
errorQuda("Double precision not supported?");
}else{
do_fermion_force_cuda( (float)eps, (float)weight1, (float)weight2, (float*)act_path_coeff,
cudaHw, siteLink, cudaMom, tempvec, param);
}
for(i=0;i < 8;i++){
freeHwQuda(tempvec[i]);
}
}
#undef BLOCK_DIM
#undef FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE
#undef FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE
} // namespace quda
| 7c2d2e9d8af2f9c0e08e1b1cf24734659ad87357.cu |
#include <read_gauge.h>
#include <gauge_field.h>
#include <fermion_force_quda.h>
#include <force_common.h>
#include <hw_quda.h>
namespace quda {
#define BLOCK_DIM 64
#define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, Vh)
#define LOAD_HW_SINGLE(hw_even, hw_odd, idx, var, oddness) do{ \
Float2* hw = (oddness)?hw_odd:hw_even; \
var##0 = hw[idx + 0*Vh]; \
var##1 = hw[idx + 1*Vh]; \
var##2 = hw[idx + 2*Vh]; \
var##3 = hw[idx + 3*Vh]; \
var##4 = hw[idx + 4*Vh]; \
var##5 = hw[idx + 5*Vh]; \
}while(0)
#define WRITE_HW_SINGLE(hw_even, hw_odd, idx, var, oddness) do{ \
Float2* hw = (oddness)?hw_odd:hw_even; \
hw[idx + 0*Vh] = var##0; \
hw[idx + 1*Vh] = var##1; \
hw[idx + 2*Vh] = var##2; \
hw[idx + 3*Vh] = var##3; \
hw[idx + 4*Vh] = var##4; \
hw[idx + 5*Vh] = var##5; \
}while(0)
#define LOAD_HW(hw_eve, hw_odd, idx, var, oddness) LOAD_HW_SINGLE(hw_eve, hw_odd, idx, var, oddness)
#define WRITE_HW(hw_even, hw_odd, idx, var, oddness) WRITE_HW_SINGLE(hw_even, hw_odd, idx, var, oddness)
#define LOAD_MATRIX(src, dir, idx, var) LOAD_MATRIX_12_SINGLE(src, dir, idx, var, Vh)
#define FF_SITE_MATRIX_LOAD_TEX 1
#define linkEvenTex siteLink0TexSingle_recon
#define linkOddTex siteLink1TexSingle_recon
#if (FF_SITE_MATRIX_LOAD_TEX == 1)
#define FF_LOAD_MATRIX(dir, idx, var, oddness) LOAD_MATRIX_12_SINGLE_TEX(((oddness)?linkOddTex:linkEvenTex), dir, idx, var, Vh)
#else
#define FF_LOAD_MATRIX(dir, idx, var, oddness) LOAD_MATRIX_12_SINGLE(((oddness)?linkOdd:linkEven), dir, idx, var, Vh)
#endif
#define linka00_re LINKA0.x
#define linka00_im LINKA0.y
#define linka01_re LINKA0.z
#define linka01_im LINKA0.w
#define linka02_re LINKA1.x
#define linka02_im LINKA1.y
#define linka10_re LINKA1.z
#define linka10_im LINKA1.w
#define linka11_re LINKA2.x
#define linka11_im LINKA2.y
#define linka12_re LINKA2.z
#define linka12_im LINKA2.w
#define linka20_re LINKA3.x
#define linka20_im LINKA3.y
#define linka21_re LINKA3.z
#define linka21_im LINKA3.w
#define linka22_re LINKA4.x
#define linka22_im LINKA4.y
#define linkb00_re LINKB0.x
#define linkb00_im LINKB0.y
#define linkb01_re LINKB0.z
#define linkb01_im LINKB0.w
#define linkb02_re LINKB1.x
#define linkb02_im LINKB1.y
#define linkb10_re LINKB1.z
#define linkb10_im LINKB1.w
#define linkb11_re LINKB2.x
#define linkb11_im LINKB2.y
#define linkb12_re LINKB2.z
#define linkb12_im LINKB2.w
#define linkb20_re LINKB3.x
#define linkb20_im LINKB3.y
#define linkb21_re LINKB3.z
#define linkb21_im LINKB3.w
#define linkb22_re LINKB4.x
#define linkb22_im LINKB4.y
#define MAT_MUL_HW(M, HW, HWOUT) \
HWOUT##00_re = (M##00_re * HW##00_re - M##00_im * HW##00_im) \
+ (M##01_re * HW##01_re - M##01_im * HW##01_im) \
+ (M##02_re * HW##02_re - M##02_im * HW##02_im); \
HWOUT##00_im = (M##00_re * HW##00_im + M##00_im * HW##00_re) \
+ (M##01_re * HW##01_im + M##01_im * HW##01_re) \
+ (M##02_re * HW##02_im + M##02_im * HW##02_re); \
HWOUT##01_re = (M##10_re * HW##00_re - M##10_im * HW##00_im) \
+ (M##11_re * HW##01_re - M##11_im * HW##01_im) \
+ (M##12_re * HW##02_re - M##12_im * HW##02_im); \
HWOUT##01_im = (M##10_re * HW##00_im + M##10_im * HW##00_re) \
+ (M##11_re * HW##01_im + M##11_im * HW##01_re) \
+ (M##12_re * HW##02_im + M##12_im * HW##02_re); \
HWOUT##02_re = (M##20_re * HW##00_re - M##20_im * HW##00_im) \
+ (M##21_re * HW##01_re - M##21_im * HW##01_im) \
+ (M##22_re * HW##02_re - M##22_im * HW##02_im); \
HWOUT##02_im = (M##20_re * HW##00_im + M##20_im * HW##00_re) \
+ (M##21_re * HW##01_im + M##21_im * HW##01_re) \
+ (M##22_re * HW##02_im + M##22_im * HW##02_re); \
HWOUT##10_re = (M##00_re * HW##10_re - M##00_im * HW##10_im) \
+ (M##01_re * HW##11_re - M##01_im * HW##11_im) \
+ (M##02_re * HW##12_re - M##02_im * HW##12_im); \
HWOUT##10_im = (M##00_re * HW##10_im + M##00_im * HW##10_re) \
+ (M##01_re * HW##11_im + M##01_im * HW##11_re) \
+ (M##02_re * HW##12_im + M##02_im * HW##12_re); \
HWOUT##11_re = (M##10_re * HW##10_re - M##10_im * HW##10_im) \
+ (M##11_re * HW##11_re - M##11_im * HW##11_im) \
+ (M##12_re * HW##12_re - M##12_im * HW##12_im); \
HWOUT##11_im = (M##10_re * HW##10_im + M##10_im * HW##10_re) \
+ (M##11_re * HW##11_im + M##11_im * HW##11_re) \
+ (M##12_re * HW##12_im + M##12_im * HW##12_re); \
HWOUT##12_re = (M##20_re * HW##10_re - M##20_im * HW##10_im) \
+ (M##21_re * HW##11_re - M##21_im * HW##11_im) \
+ (M##22_re * HW##12_re - M##22_im * HW##12_im); \
HWOUT##12_im = (M##20_re * HW##10_im + M##20_im * HW##10_re) \
+ (M##21_re * HW##11_im + M##21_im * HW##11_re) \
+ (M##22_re * HW##12_im + M##22_im * HW##12_re);
#define ADJ_MAT_MUL_HW(M, HW, HWOUT) \
HWOUT##00_re = (M##00_re * HW##00_re + M##00_im * HW##00_im) \
+ (M##10_re * HW##01_re + M##10_im * HW##01_im) \
+ (M##20_re * HW##02_re + M##20_im * HW##02_im); \
HWOUT##00_im = (M##00_re * HW##00_im - M##00_im * HW##00_re) \
+ (M##10_re * HW##01_im - M##10_im * HW##01_re) \
+ (M##20_re * HW##02_im - M##20_im * HW##02_re); \
HWOUT##01_re = (M##01_re * HW##00_re + M##01_im * HW##00_im) \
+ (M##11_re * HW##01_re + M##11_im * HW##01_im) \
+ (M##21_re * HW##02_re + M##21_im * HW##02_im); \
HWOUT##01_im = (M##01_re * HW##00_im - M##01_im * HW##00_re) \
+ (M##11_re * HW##01_im - M##11_im * HW##01_re) \
+ (M##21_re * HW##02_im - M##21_im * HW##02_re); \
HWOUT##02_re = (M##02_re * HW##00_re + M##02_im * HW##00_im) \
+ (M##12_re * HW##01_re + M##12_im * HW##01_im) \
+ (M##22_re * HW##02_re + M##22_im * HW##02_im); \
HWOUT##02_im = (M##02_re * HW##00_im - M##02_im * HW##00_re) \
+ (M##12_re * HW##01_im - M##12_im * HW##01_re) \
+ (M##22_re * HW##02_im - M##22_im * HW##02_re); \
HWOUT##10_re = (M##00_re * HW##10_re + M##00_im * HW##10_im) \
+ (M##10_re * HW##11_re + M##10_im * HW##11_im) \
+ (M##20_re * HW##12_re + M##20_im * HW##12_im); \
HWOUT##10_im = (M##00_re * HW##10_im - M##00_im * HW##10_re) \
+ (M##10_re * HW##11_im - M##10_im * HW##11_re) \
+ (M##20_re * HW##12_im - M##20_im * HW##12_re); \
HWOUT##11_re = (M##01_re * HW##10_re + M##01_im * HW##10_im) \
+ (M##11_re * HW##11_re + M##11_im * HW##11_im) \
+ (M##21_re * HW##12_re + M##21_im * HW##12_im); \
HWOUT##11_im = (M##01_re * HW##10_im - M##01_im * HW##10_re) \
+ (M##11_re * HW##11_im - M##11_im * HW##11_re) \
+ (M##21_re * HW##12_im - M##21_im * HW##12_re); \
HWOUT##12_re = (M##02_re * HW##10_re + M##02_im * HW##10_im) \
+ (M##12_re * HW##11_re + M##12_im * HW##11_im) \
+ (M##22_re * HW##12_re + M##22_im * HW##12_im); \
HWOUT##12_im = (M##02_re * HW##10_im - M##02_im * HW##10_re) \
+ (M##12_re * HW##11_im - M##12_im * HW##11_re) \
+ (M##22_re * HW##12_im - M##22_im * HW##12_re);
#define SU3_PROJECTOR(va, vb, m) \
m##00_re = va##0_re * vb##0_re + va##0_im * vb##0_im; \
m##00_im = va##0_im * vb##0_re - va##0_re * vb##0_im; \
m##01_re = va##0_re * vb##1_re + va##0_im * vb##1_im; \
m##01_im = va##0_im * vb##1_re - va##0_re * vb##1_im; \
m##02_re = va##0_re * vb##2_re + va##0_im * vb##2_im; \
m##02_im = va##0_im * vb##2_re - va##0_re * vb##2_im; \
m##10_re = va##1_re * vb##0_re + va##1_im * vb##0_im; \
m##10_im = va##1_im * vb##0_re - va##1_re * vb##0_im; \
m##11_re = va##1_re * vb##1_re + va##1_im * vb##1_im; \
m##11_im = va##1_im * vb##1_re - va##1_re * vb##1_im; \
m##12_re = va##1_re * vb##2_re + va##1_im * vb##2_im; \
m##12_im = va##1_im * vb##2_re - va##1_re * vb##2_im; \
m##20_re = va##2_re * vb##0_re + va##2_im * vb##0_im; \
m##20_im = va##2_im * vb##0_re - va##2_re * vb##0_im; \
m##21_re = va##2_re * vb##1_re + va##2_im * vb##1_im; \
m##21_im = va##2_im * vb##1_re - va##2_re * vb##1_im; \
m##22_re = va##2_re * vb##2_re + va##2_im * vb##2_im; \
m##22_im = va##2_im * vb##2_re - va##2_re * vb##2_im;
//vc = va + vb*s
#define SCALAR_MULT_ADD_SU3_VECTOR(va, vb, s, vc) do { \
vc##0_re = va##0_re + vb##0_re * s; \
vc##0_im = va##0_im + vb##0_im * s; \
vc##1_re = va##1_re + vb##1_re * s; \
vc##1_im = va##1_im + vb##1_im * s; \
vc##2_re = va##2_re + vb##2_re * s; \
vc##2_im = va##2_im + vb##2_im * s; \
}while (0)
#define FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mydir, idx, new_idx) do { \
switch(mydir){ \
case 0: \
new_idx = ( (new_x1==X1m1)?idx-X1m1:idx+1); \
new_x1 = (new_x1==X1m1)?0:new_x1+1; \
break; \
case 1: \
new_idx = ( (new_x2==X2m1)?idx-X2X1mX1:idx+X1); \
new_x2 = (new_x2==X2m1)?0:new_x2+1; \
break; \
case 2: \
new_idx = ( (new_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \
new_x3 = (new_x3==X3m1)?0:new_x3+1; \
break; \
case 3: \
new_idx = ( (new_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \
new_x4 = (new_x4==X4m1)?0:new_x4+1; \
break; \
} \
}while(0)
#define FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mydir, idx, new_idx) do { \
switch(mydir){ \
case 0: \
new_idx = ( (new_x1==0)?idx+X1m1:idx-1); \
new_x1 = (new_x1==0)?X1m1:new_x1 - 1; \
break; \
case 1: \
new_idx = ( (new_x2==0)?idx+X2X1mX1:idx-X1); \
new_x2 = (new_x2==0)?X2m1:new_x2 - 1; \
break; \
case 2: \
new_idx = ( (new_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \
new_x3 = (new_x3==0)?X3m1:new_x3 - 1; \
break; \
case 3: \
new_idx = ( (new_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \
new_x4 = (new_x4==0)?X4m1:new_x4 - 1; \
break; \
} \
}while(0)
#define FF_COMPUTE_NEW_FULL_IDX_PLUS(old_x1, old_x2, old_x3, old_x4, idx, mydir, new_idx) do { \
switch(mydir){ \
case 0: \
new_idx = ( (old_x1==X1m1)?idx-X1m1:idx+1); \
break; \
case 1: \
new_idx = ( (old_x2==X2m1)?idx-X2X1mX1:idx+X1); \
break; \
case 2: \
new_idx = ( (old_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \
break; \
case 3: \
new_idx = ( (old_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \
break; \
} \
}while(0)
#define FF_COMPUTE_NEW_FULL_IDX_MINUS(old_x1, old_x2, old_x3, old_x4, idx, mydir, new_idx) do { \
switch(mydir){ \
case 0: \
new_idx = ( (old_x1==0)?idx+X1m1:idx-1); \
break; \
case 1: \
new_idx = ( (old_x2==0)?idx+X2X1mX1:idx-X1); \
break; \
case 2: \
new_idx = ( (old_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \
break; \
case 3: \
new_idx = ( (old_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \
break; \
} \
}while(0)
//this macro require linka, linkb, and ah variables defined
#define ADD_FORCE_TO_MOM(hw1, hw2, idx, dir, cf,oddness) do{ \
Float2 my_coeff; \
int mydir; \
if (GOES_BACKWARDS(dir)){ \
mydir=OPP_DIR(dir); \
my_coeff.x = -cf.x; \
my_coeff.y = -cf.y; \
}else{ \
mydir=dir; \
my_coeff.x = cf.x; \
my_coeff.y = cf.y; \
} \
Float2 tmp_coeff; \
tmp_coeff.x = my_coeff.x; \
tmp_coeff.y = my_coeff.y; \
if(oddness){ \
tmp_coeff.x = - my_coeff.x; \
tmp_coeff.y = - my_coeff.y; \
} \
Float2* mom = oddness?momOdd:momEven; \
LOAD_ANTI_HERMITIAN(mom, mydir, idx, AH); \
UNCOMPRESS_ANTI_HERMITIAN(ah, linka); \
SU3_PROJECTOR(hw1##0, hw2##0, linkb); \
SCALAR_MULT_ADD_SU3_MATRIX(linka, linkb, tmp_coeff.x, linka); \
SU3_PROJECTOR(hw1##1, hw2##1, linkb); \
SCALAR_MULT_ADD_SU3_MATRIX(linka, linkb, tmp_coeff.y, linka); \
MAKE_ANTI_HERMITIAN(linka, ah); \
WRITE_ANTI_HERMITIAN(mom, mydir, idx, AH, Vh); \
}while(0)
#define FF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, i1,i2,i3,i4) do { \
sign =1; \
switch(dir){ \
case XUP: \
if ( (i4 & 1) == 1){ \
sign = -1; \
} \
break; \
case YUP: \
if ( ((i4+i1) & 1) == 1){ \
sign = -1; \
} \
break; \
case ZUP: \
if ( ((i4+i1+i2) & 1) == 1){ \
sign = -1; \
} \
break; \
case TUP: \
if (i4 == X4m1 ){ \
sign = -1; \
} \
break; \
} \
}while (0)
#define hwa00_re HWA0.x
#define hwa00_im HWA0.y
#define hwa01_re HWA1.x
#define hwa01_im HWA1.y
#define hwa02_re HWA2.x
#define hwa02_im HWA2.y
#define hwa10_re HWA3.x
#define hwa10_im HWA3.y
#define hwa11_re HWA4.x
#define hwa11_im HWA4.y
#define hwa12_re HWA5.x
#define hwa12_im HWA5.y
#define hwb00_re HWB0.x
#define hwb00_im HWB0.y
#define hwb01_re HWB1.x
#define hwb01_im HWB1.y
#define hwb02_re HWB2.x
#define hwb02_im HWB2.y
#define hwb10_re HWB3.x
#define hwb10_im HWB3.y
#define hwb11_re HWB4.x
#define hwb11_im HWB4.y
#define hwb12_re HWB5.x
#define hwb12_im HWB5.y
#define hwc00_re HWC0.x
#define hwc00_im HWC0.y
#define hwc01_re HWC1.x
#define hwc01_im HWC1.y
#define hwc02_re HWC2.x
#define hwc02_im HWC2.y
#define hwc10_re HWC3.x
#define hwc10_im HWC3.y
#define hwc11_re HWC4.x
#define hwc11_im HWC4.y
#define hwc12_re HWC5.x
#define hwc12_im HWC5.y
#define hwd00_re HWD0.x
#define hwd00_im HWD0.y
#define hwd01_re HWD1.x
#define hwd01_im HWD1.y
#define hwd02_re HWD2.x
#define hwd02_im HWD2.y
#define hwd10_re HWD3.x
#define hwd10_im HWD3.y
#define hwd11_re HWD4.x
#define hwd11_im HWD4.y
#define hwd12_re HWD5.x
#define hwd12_im HWD5.y
#define hwe00_re HWE0.x
#define hwe00_im HWE0.y
#define hwe01_re HWE1.x
#define hwe01_im HWE1.y
#define hwe02_re HWE2.x
#define hwe02_im HWE2.y
#define hwe10_re HWE3.x
#define hwe10_im HWE3.y
#define hwe11_re HWE4.x
#define hwe11_im HWE4.y
#define hwe12_re HWE5.x
#define hwe12_im HWE5.y
void fermion_force_init_cuda(QudaGaugeParam* param)
{
#ifdef MULTI_GPU
#error "multi gpu is not supported for fermion force computation"
#endif
static int fermion_force_init_cuda_flag = 0;
if (fermion_force_init_cuda_flag) return;
fermion_force_init_cuda_flag=1;
}
/*
* This function computes contribution to mometum from the middle link in a staple
*
* tempx: IN
* Pmu: OUT
* P3: OUT
*
*/
template<int sig_positive, int mu_positive, int oddBit, typename Float2>
__global__ void
do_middle_link_kernel(Float2* tempxEven, Float2* tempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* P3Even, Float2* P3Odd,
int sig, int mu, Float2 coeff,
float4* linkEven, float4* linkOdd,
Float2* momEven, Float2* momOdd)
{
int sid = blockIdx.x * blockDim.x + threadIdx.x;
int z1 = sid / X1h;
int x1h = sid - z1*X1h;
int z2 = z1 / X2;
int x2 = z1 - z2*X2;
int x4 = z2 / X3;
int x3 = z2 - x4*X3;
int x1odd = (x2 + x3 + x4 + oddBit) & 1;
int x1 = 2*x1h + x1odd;
int X = 2*sid + x1odd;
int new_x1, new_x2, new_x3, new_x4;
int new_mem_idx;
int ad_link_sign=1;
int ab_link_sign=1;
int bc_link_sign=1;
Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5;
Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5;
Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5;
Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5;
float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4;
float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4;
Float2 AH0, AH1, AH2, AH3, AH4;
/* sig
* A________B
* mu | |
* D | |C
*
* A is the current point (sid)
*/
int point_b, point_c, point_d;
int ad_link_nbr_idx, ab_link_nbr_idx, bc_link_nbr_idx;
int mymu;
new_x1 = x1;
new_x2 = x2;
new_x3 = x3;
new_x4 = x4;
if(mu_positive){
mymu =mu;
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mu, X, new_mem_idx);
}else{
mymu = OPP_DIR(mu);
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(OPP_DIR(mu), X, new_mem_idx);
}
point_d = (new_mem_idx >> 1);
if (mu_positive){
ad_link_nbr_idx = point_d;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}else{
ad_link_nbr_idx = sid;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4);
}
int mysig;
if(sig_positive){
mysig = sig;
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, new_mem_idx, new_mem_idx);
}else{
mysig = OPP_DIR(sig);
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), new_mem_idx, new_mem_idx);
}
point_c = (new_mem_idx >> 1);
if (mu_positive){
bc_link_nbr_idx = point_c;
FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}
new_x1 = x1;
new_x2 = x2;
new_x3 = x3;
new_x4 = x4;
if(sig_positive){
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, X, new_mem_idx);
}else{
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), X, new_mem_idx);
}
point_b = (new_mem_idx >> 1);
if (!mu_positive){
bc_link_nbr_idx = point_b;
FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}
if(sig_positive){
ab_link_nbr_idx = sid;
FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, x1, x2, x3, x4);
}else{
ab_link_nbr_idx = point_b;
FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, new_x1,new_x2,new_x3,new_x4);
}
LOAD_HW(tempxEven, tempxOdd, point_d, HWA, 1-oddBit );
if(mu_positive){
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, 1-oddBit);
}else{
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, oddBit);
}
RECONSTRUCT_LINK_12(ad_link_sign, linka);
if (mu_positive){
ADJ_MAT_MUL_HW(linka, hwa, hwd);
}else{
MAT_MUL_HW(linka, hwa, hwd);
}
WRITE_HW(PmuEven,PmuOdd, sid, HWD, oddBit);
LOAD_HW(tempxEven,tempxOdd, point_c, HWA, oddBit);
if(mu_positive){
FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, oddBit);
}else{
FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, 1-oddBit);
}
RECONSTRUCT_LINK_12(bc_link_sign, linka);
if (mu_positive){
ADJ_MAT_MUL_HW(linka, hwa, hwb);
}else{
MAT_MUL_HW(linka, hwa, hwb);
}
if(sig_positive){
FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKB, oddBit);
}else{
FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKB, 1-oddBit);
}
RECONSTRUCT_LINK_12(ab_link_sign, linkb);
if (sig_positive){
MAT_MUL_HW(linkb, hwb, hwc);
}else{
ADJ_MAT_MUL_HW(linkb, hwb, hwc);
}
WRITE_HW(P3Even, P3Odd, sid, HWC, oddBit);
if (sig_positive){
//add the force to mom
ADD_FORCE_TO_MOM(hwc, hwd, sid, sig, coeff, oddBit);
}
}
template<typename Float2>
static void
middle_link_kernel(Float2* tempxEven, Float2* tempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* P3Even, Float2* P3Odd,
int sig, int mu, Float2 coeff,
float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink,
Float2* momEven, Float2* momOdd,
dim3 gridDim, dim3 BlockDim)
{
dim3 halfGridDim(gridDim.x/2, 1,1);
#define CALL_MIDDLE_LINK_KERNEL(sig_sign, mu_sign) \
do_middle_link_kernel<sig_sign, mu_sign,0><<<halfGridDim, BlockDim>>>( tempxEven, tempxOdd, \
PmuEven, PmuOdd, \
P3Even, P3Odd, \
sig, mu, coeff, \
linkEven, linkOdd, \
momEven, momOdd); \
do_middle_link_kernel<sig_sign, mu_sign, 1><<<halfGridDim, BlockDim>>>(tempxEven, tempxOdd, \
PmuEven, PmuOdd, \
P3Even, P3Odd, \
sig, mu, coeff, \
linkEven, linkOdd, \
momEven, momOdd);
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1, 1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1, 0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(0, 1);
}else{
CALL_MIDDLE_LINK_KERNEL(0, 0);
}
#undef CALL_MIDDLE_LINK_KERNEL
}
/*
* Computes contribution to momentum from the side links in a staple
*
* P3: IN
* P3mu: not used
* Tempx: IN
* Pmu: IN
* shortPE: OUT
*
*/
template<int sig_positive, int mu_positive, int oddBit, typename Float2>
__global__ void
do_side_link_kernel(Float2* P3Even, Float2* P3Odd,
Float2* P3muEven, Float2* P3muOdd,
Float2* TempxEven, Float2* TempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* shortPEven, Float2* shortPOdd,
int sig, int mu, Float2 coeff, Float2 accumu_coeff,
float4* linkEven, float4* linkOdd,
Float2* momEven, Float2* momOdd)
{
Float2 mcoeff;
mcoeff.x = -coeff.x;
mcoeff.y = -coeff.y;
int sid = blockIdx.x * blockDim.x + threadIdx.x;
int z1 = sid / X1h;
int x1h = sid - z1*X1h;
int z2 = z1 / X2;
int x2 = z1 - z2*X2;
int x4 = z2 / X3;
int x3 = z2 - x4*X3;
int x1odd = (x2 + x3 + x4 + oddBit) & 1;
int x1 = 2*x1h + x1odd;
int X = 2*sid + x1odd;
int ad_link_sign = 1;
Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5;
Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5;
Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5;
float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4;
float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4;
Float2 AH0, AH1, AH2, AH3, AH4;
/*
* compute the side link contribution to the momentum
*
*
* sig
* A________B
* | | mu
* D | |C
*
* A is the current point (sid)
*/
int point_d;
int ad_link_nbr_idx;
int mymu;
int new_mem_idx;
int new_x1 = x1;
int new_x2 = x2;
int new_x3 = x3;
int new_x4 = x4;
if(mu_positive){
mymu =mu;
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mymu,X, new_mem_idx);
}else{
mymu = OPP_DIR(mu);
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mymu, X, new_mem_idx);
}
point_d = (new_mem_idx >> 1);
if (mu_positive){
ad_link_nbr_idx = point_d;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}else{
ad_link_nbr_idx = sid;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4);
}
LOAD_HW(P3Even, P3Odd, sid, HWA, oddBit);
if(mu_positive){
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, 1 - oddBit);
}else{
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, oddBit);
}
RECONSTRUCT_LINK_12(ad_link_sign, linka);
if (mu_positive){
MAT_MUL_HW(linka, hwa, hwb);
}else{
ADJ_MAT_MUL_HW(linka, hwa, hwb);
}
//start to add side link force
if (mu_positive){
LOAD_HW(TempxEven, TempxOdd, point_d, HWC, 1-oddBit);
if (sig_positive){
ADD_FORCE_TO_MOM(hwb, hwc, point_d, mu, coeff, 1-oddBit);
}else{
ADD_FORCE_TO_MOM(hwc, hwb, point_d, OPP_DIR(mu), mcoeff, 1- oddBit);
}
}else{
LOAD_HW(PmuEven, PmuOdd, sid, HWC, oddBit);
if (sig_positive){
ADD_FORCE_TO_MOM(hwa, hwc, sid, mu, mcoeff, oddBit);
}else{
ADD_FORCE_TO_MOM(hwc, hwa, sid, OPP_DIR(mu), coeff, oddBit);
}
}
if (shortPOdd){
LOAD_HW(shortPEven, shortPOdd, point_d, HWA, 1-oddBit);
SCALAR_MULT_ADD_SU3_VECTOR(hwa0, hwb0, accumu_coeff.x, hwa0);
SCALAR_MULT_ADD_SU3_VECTOR(hwa1, hwb1, accumu_coeff.y, hwa1);
WRITE_HW(shortPEven, shortPOdd, point_d, HWA, 1-oddBit);
}
}
template<typename Float2>
static void
side_link_kernel(Float2* P3Even, Float2* P3Odd,
Float2* P3muEven, Float2* P3muOdd,
Float2* TempxEven, Float2* TempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* shortPEven, Float2* shortPOdd,
int sig, int mu, Float2 coeff, Float2 accumu_coeff,
float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink,
Float2* momEven, Float2* momOdd,
dim3 gridDim, dim3 blockDim)
{
dim3 halfGridDim(gridDim.x/2,1,1);
#define CALL_SIDE_LINK_KERNEL(sig_sign, mu_sign) \
do_side_link_kernel<sig_sign,mu_sign,0><<<halfGridDim, blockDim>>>( P3Even, P3Odd, \
P3muEven, P3muOdd, \
TempxEven, TempxOdd, \
PmuEven, PmuOdd, \
shortPEven, shortPOdd, \
sig, mu, coeff, accumu_coeff, \
linkEven, linkOdd, \
momEven, momOdd); \
do_side_link_kernel<sig_sign,mu_sign,1><<<halfGridDim, blockDim>>>( P3Even, P3Odd, \
P3muEven, P3muOdd, \
TempxEven, TempxOdd, \
PmuEven, PmuOdd, \
shortPEven, shortPOdd, \
sig, mu, coeff, accumu_coeff, \
linkEven, linkOdd, \
momEven, momOdd);
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(0,1);
}else{
CALL_SIDE_LINK_KERNEL(0,0);
}
#undef CALL_SIDE_LINK_KERNEL
}
/*
* This function computes the contribution to momentum from middle and side links
*
* tempx: IN
* Pmu: not used
* P3: not used
* P3mu: not used
* shortP: OUT
*
*/
template<int sig_positive, int mu_positive, int oddBit, typename Float2>
__global__ void
do_all_link_kernel(Float2* tempxEven, Float2* tempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* P3Even, Float2* P3Odd,
Float2* P3muEven, Float2* P3muOdd,
Float2* shortPEven, Float2* shortPOdd,
int sig, int mu, Float2 coeff, Float2 mcoeff, Float2 accumu_coeff,
float4* linkEven, float4* linkOdd,
Float2* momEven, Float2* momOdd)
{
int sid = blockIdx.x * blockDim.x + threadIdx.x;
int z1 = sid / X1h;
int x1h = sid - z1*X1h;
int z2 = z1 / X2;
int x2 = z1 - z2*X2;
int x4 = z2 / X3;
int x3 = z2 - x4*X3;
int x1odd = (x2 + x3 + x4 + oddBit) & 1;
int x1 = 2*x1h + x1odd;
int X = 2*sid + x1odd;
int new_x1, new_x2, new_x3, new_x4;
int ad_link_sign=1;
int ab_link_sign=1;
int bc_link_sign=1;
Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5;
Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5;
Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5;
Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5;
Float2 HWE0, HWE1, HWE2, HWE3, HWE4, HWE5;
float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4;
float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4;
float4 LINKC0, LINKC1, LINKC2, LINKC3, LINKC4;
Float2 AH0, AH1, AH2, AH3, AH4;
/* sig
* A________B
* mu | |
* D | |C
*
* A is the current point (sid)
*/
int point_b, point_c, point_d;
int ad_link_nbr_idx, ab_link_nbr_idx, bc_link_nbr_idx;
int mymu;
int new_mem_idx;
new_x1 = x1;
new_x2 = x2;
new_x3 = x3;
new_x4 = x4;
if(mu_positive){
mymu =mu;
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mu, X, new_mem_idx);
}else{
mymu = OPP_DIR(mu);
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(OPP_DIR(mu), X, new_mem_idx);
}
point_d = (new_mem_idx >> 1);
if (mu_positive){
ad_link_nbr_idx = point_d;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}else{
ad_link_nbr_idx = sid;
FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4);
}
int mysig;
if(sig_positive){
mysig = sig;
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, new_mem_idx, new_mem_idx);
}else{
mysig = OPP_DIR(sig);
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), new_mem_idx, new_mem_idx);
}
point_c = (new_mem_idx >> 1);
if (mu_positive){
bc_link_nbr_idx = point_c;
FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}
new_x1 = x1;
new_x2 = x2;
new_x3 = x3;
new_x4 = x4;
if(sig_positive){
FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, X, new_mem_idx);
}else{
FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), X, new_mem_idx);
}
point_b = (new_mem_idx >> 1);
if (!mu_positive){
bc_link_nbr_idx = point_b;
FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4);
}
if(sig_positive){
ab_link_nbr_idx = sid;
FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, x1, x2, x3, x4);
}else{
ab_link_nbr_idx = point_b;
FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, new_x1,new_x2,new_x3,new_x4);
}
LOAD_HW(tempxEven, tempxOdd, point_d, HWE, 1-oddBit);
if (mu_positive){
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKC, 1-oddBit);
}else{
FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKC, oddBit);
}
RECONSTRUCT_LINK_12(ad_link_sign, linkc);
if (mu_positive){
ADJ_MAT_MUL_HW(linkc, hwe, hwd);
}else{
MAT_MUL_HW(linkc, hwe, hwd);
}
//we do not need to write Pmu here
//WRITE_HW(myPmu, sid, HWD);
LOAD_HW(tempxEven, tempxOdd, point_c, HWA, oddBit);
if (mu_positive){
FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, oddBit);
}else{
FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, 1-oddBit);
}
RECONSTRUCT_LINK_12(bc_link_sign, linka);
if (mu_positive){
ADJ_MAT_MUL_HW(linka, hwa, hwb);
}else{
MAT_MUL_HW(linka, hwa, hwb);
}
if (sig_positive){
FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKA, oddBit);
}else{
FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKA, 1-oddBit);
}
RECONSTRUCT_LINK_12(ab_link_sign, linka);
if (sig_positive){
MAT_MUL_HW(linka, hwb, hwc);
}else{
ADJ_MAT_MUL_HW(linka, hwb, hwc);
}
//we do not need to write P3 here
//WRITE_HW(myP3, sid, HWC);
//The middle link contribution
if (sig_positive){
//add the force to mom
ADD_FORCE_TO_MOM(hwc, hwd, sid, sig, mcoeff, oddBit);
}
//P3 is hwc
//ad_link is linkc
if (mu_positive){
MAT_MUL_HW(linkc, hwc, hwa);
}else{
ADJ_MAT_MUL_HW(linkc, hwc, hwa);
}
//accumulate P7rho to P5
//WRITE_HW(otherP3mu, point_d, HWA);
LOAD_HW(shortPEven, shortPOdd, point_d, HWB, 1-oddBit);
SCALAR_MULT_ADD_SU3_VECTOR(hwb0, hwa0, accumu_coeff.x, hwb0);
SCALAR_MULT_ADD_SU3_VECTOR(hwb1, hwa1, accumu_coeff.y, hwb1);
WRITE_HW(shortPEven, shortPOdd, point_d, HWB, 1-oddBit);
//hwe holds tempx at point_d
//hwd holds Pmu at point A(sid)
if (mu_positive){
if (sig_positive){
ADD_FORCE_TO_MOM(hwa, hwe, point_d, mu, coeff, 1-oddBit);
}else{
ADD_FORCE_TO_MOM(hwe, hwa, point_d, OPP_DIR(mu), mcoeff, 1- oddBit);
}
}else{
if (sig_positive){
ADD_FORCE_TO_MOM(hwc, hwd, sid, mu, mcoeff, oddBit);
}else{
ADD_FORCE_TO_MOM(hwd, hwc, sid, OPP_DIR(mu), coeff, oddBit);
}
}
}
template<typename Float2>
static void
all_link_kernel(Float2* tempxEven, Float2* tempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* P3Even, Float2* P3Odd,
Float2* P3muEven, Float2* P3muOdd,
Float2* shortPEven, Float2* shortPOdd,
int sig, int mu, Float2 coeff, Float2 mcoeff, Float2 accumu_coeff,
float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink,
Float2* momEven, Float2* momOdd,
dim3 gridDim, dim3 blockDim)
{
dim3 halfGridDim(gridDim.x/2, 1,1);
#define CALL_ALL_LINK_KERNEL(sig_sign, mu_sign) \
do_all_link_kernel<sig_sign,mu_sign,0><<<halfGridDim, blockDim>>>(tempxEven, tempxOdd, \
PmuEven, PmuOdd, \
P3Even, P3Odd, \
P3muEven, P3muOdd, \
shortPEven, shortPOdd, \
sig, mu, coeff, mcoeff, accumu_coeff, \
linkEven, linkOdd, \
momEven, momOdd); \
do_all_link_kernel<sig_sign,mu_sign,1><<<halfGridDim, blockDim>>>(tempxEven, tempxOdd, \
PmuEven, PmuOdd, \
P3Even, P3Odd, \
P3muEven, P3muOdd, \
shortPEven, shortPOdd, \
sig, mu, coeff, mcoeff, accumu_coeff, \
linkEven, linkOdd, \
momEven, momOdd);
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_ALL_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_ALL_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_ALL_LINK_KERNEL(0,1);
}else{
CALL_ALL_LINK_KERNEL(0,0);
}
#undef CALL_ALL_LINK_KERNEL
}
/* This function computes the one and naik terms' contribution to momentum
*
* Tempx: IN
* Pmu: IN
* Pnumu: IN
*
*/
template <int oddBit, typename Float2>
__global__ void
do_one_and_naik_terms_kernel(Float2* TempxEven, Float2* TempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* PnumuEven, Float2* PnumuOdd,
int mu, Float2 OneLink, Float2 Naik, Float2 mNaik,
float4* linkEven, float4* linkOdd,
Float2* momEven, Float2* momOdd)
{
Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5;
Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5;
Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5;
Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5;
float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4;
float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4;
Float2 AH0, AH1, AH2, AH3, AH4;
int sid = blockIdx.x * blockDim.x + threadIdx.x;
int z1 = sid / X1h;
int x1h = sid - z1*X1h;
int z2 = z1 / X2;
int x2 = z1 - z2*X2;
int x4 = z2 / X3;
int x3 = z2 - x4*X3;
int x1odd = (x2 + x3 + x4 + oddBit) & 1;
int x1 = 2*x1h + x1odd;
//int X = 2*sid + x1odd;
int dx[4];
int new_x1, new_x2, new_x3, new_x4, new_idx;
int sign=1;
if (GOES_BACKWARDS(mu)){
//The one link
LOAD_HW(PmuEven, PmuOdd, sid, HWA, oddBit);
LOAD_HW(TempxEven, TempxOdd, sid, HWB, oddBit);
ADD_FORCE_TO_MOM(hwa, hwb, sid, OPP_DIR(mu), OneLink, oddBit);
//Naik term
dx[3]=dx[2]=dx[1]=dx[0]=0;
dx[OPP_DIR(mu)] = -1;
new_x1 = (x1 + dx[0] + X1)%X1;
new_x2 = (x2 + dx[1] + X2)%X2;
new_x3 = (x3 + dx[2] + X3)%X3;
new_x4 = (x4 + dx[3] + X4)%X4;
new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1;
LOAD_HW(TempxEven, TempxOdd, new_idx, HWA, 1-oddBit);
FF_LOAD_MATRIX(OPP_DIR(mu), new_idx, LINKA, 1-oddBit);
FF_COMPUTE_RECONSTRUCT_SIGN(sign, OPP_DIR(mu), new_x1,new_x2,new_x3,new_x4);
RECONSTRUCT_LINK_12(sign, linka);
ADJ_MAT_MUL_HW(linka, hwa, hwc); //Popmu
LOAD_HW(PnumuEven, PnumuOdd, sid, HWD, oddBit);
ADD_FORCE_TO_MOM(hwd, hwc, sid, OPP_DIR(mu), mNaik, oddBit);
dx[3]=dx[2]=dx[1]=dx[0]=0;
dx[OPP_DIR(mu)] = 1;
new_x1 = (x1 + dx[0] + X1)%X1;
new_x2 = (x2 + dx[1] + X2)%X2;
new_x3 = (x3 + dx[2] + X3)%X3;
new_x4 = (x4 + dx[3] + X4)%X4;
new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1;
LOAD_HW(PnumuEven, PnumuOdd, new_idx, HWA, 1-oddBit);
FF_LOAD_MATRIX(OPP_DIR(mu), sid, LINKA, oddBit);
FF_COMPUTE_RECONSTRUCT_SIGN(sign, OPP_DIR(mu), x1, x2, x3, x4);
RECONSTRUCT_LINK_12(sign, linka);
MAT_MUL_HW(linka, hwa, hwc);
ADD_FORCE_TO_MOM(hwc, hwb, sid, OPP_DIR(mu), Naik, oddBit);
}else{
dx[3]=dx[2]=dx[1]=dx[0]=0;
dx[mu] = 1;
new_x1 = (x1 + dx[0] + X1)%X1;
new_x2 = (x2 + dx[1] + X2)%X2;
new_x3 = (x3 + dx[2] + X3)%X3;
new_x4 = (x4 + dx[3] + X4)%X4;
new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1;
LOAD_HW(TempxEven, TempxOdd, new_idx, HWA, 1-oddBit);
FF_LOAD_MATRIX(mu, sid, LINKA, oddBit);
FF_COMPUTE_RECONSTRUCT_SIGN(sign, mu, x1, x2, x3, x4);
RECONSTRUCT_LINK_12(sign, linka);
MAT_MUL_HW(linka, hwa, hwb);
LOAD_HW(PnumuEven, PnumuOdd, sid, HWC, oddBit);
ADD_FORCE_TO_MOM(hwb, hwc, sid, mu, Naik, oddBit);
}
}
template<typename Float2>
static void
one_and_naik_terms_kernel(Float2* TempxEven, Float2* TempxOdd,
Float2* PmuEven, Float2* PmuOdd,
Float2* PnumuEven, Float2* PnumuOdd,
int mu, Float2 OneLink, Float2 Naik, Float2 mNaik,
float4* linkEven, float4* linkOdd,
Float2* momEven, Float2* momOdd,
dim3 gridDim, dim3 blockDim)
{
dim3 halfGridDim(gridDim.x/2, 1,1);
do_one_and_naik_terms_kernel<0><<<halfGridDim, blockDim>>>(TempxEven, TempxOdd,
PmuEven, PmuOdd,
PnumuEven, PnumuOdd,
mu, OneLink, Naik, mNaik,
linkEven, linkOdd,
momEven, momOdd);
do_one_and_naik_terms_kernel<1><<<halfGridDim, blockDim>>>(TempxEven, TempxOdd,
PmuEven, PmuOdd,
PnumuEven, PnumuOdd,
mu, OneLink, Naik, mNaik,
linkEven, linkOdd,
momEven, momOdd);
return;
}
#define Pmu tempvec[0]
#define Pnumu tempvec[1]
#define Prhonumu tempvec[2]
#define P7 tempvec[3]
#define P7rho tempvec[4]
#define P7rhonu tempvec[5]
#define P5 tempvec[6]
#define P3 tempvec[7]
#define P5nu tempvec[3]
#define P3mu tempvec[3]
#define Popmu tempvec[4]
#define Pmumumu tempvec[4]
template<typename Real>
static void
do_fermion_force_cuda(Real eps, Real weight1, Real weight2, Real* act_path_coeff, FullHw cudaHw,
cudaGaugeField &siteLink, cudaGaugeField &cudaMom, FullHw tempvec[8], QudaGaugeParam* param)
{
int mu, nu, rho, sig;
float2 coeff;
float2 OneLink, Lepage, Naik, FiveSt, ThreeSt, SevenSt;
float2 mNaik, mLepage, mFiveSt, mThreeSt, mSevenSt;
Real ferm_epsilon;
ferm_epsilon = 2.0*weight1*eps;
OneLink.x = act_path_coeff[0]*ferm_epsilon ;
Naik.x = act_path_coeff[1]*ferm_epsilon ; mNaik.x = -Naik.x;
ThreeSt.x = act_path_coeff[2]*ferm_epsilon ; mThreeSt.x = -ThreeSt.x;
FiveSt.x = act_path_coeff[3]*ferm_epsilon ; mFiveSt.x = -FiveSt.x;
SevenSt.x = act_path_coeff[4]*ferm_epsilon ; mSevenSt.x = -SevenSt.x;
Lepage.x = act_path_coeff[5]*ferm_epsilon ; mLepage.x = -Lepage.x;
ferm_epsilon = 2.0*weight2*eps;
OneLink.y = act_path_coeff[0]*ferm_epsilon ;
Naik.y = act_path_coeff[1]*ferm_epsilon ; mNaik.y = -Naik.y;
ThreeSt.y = act_path_coeff[2]*ferm_epsilon ; mThreeSt.y = -ThreeSt.y;
FiveSt.y = act_path_coeff[3]*ferm_epsilon ; mFiveSt.y = -FiveSt.y;
SevenSt.y = act_path_coeff[4]*ferm_epsilon ; mSevenSt.y = -SevenSt.y;
Lepage.y = act_path_coeff[5]*ferm_epsilon ; mLepage.y = -Lepage.y;
int DirectLinks[8] ;
for(mu=0;mu<8;mu++){
DirectLinks[mu] = 0 ;
}
int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3];
dim3 blockDim(BLOCK_DIM,1,1);
dim3 gridDim(volume/blockDim.x, 1, 1);
cudaBindTexture(0, siteLink0TexSingle_recon, siteLink.Even_p(), siteLink.Bytes()/2);
cudaBindTexture(0, siteLink1TexSingle_recon, siteLink.Odd_p(), siteLink.Bytes()/2);
for(sig=0; sig < 8; sig++){
for(mu = 0; mu < 8; mu++){
if ( (mu == sig) || (mu == OPP_DIR(sig))){
continue;
}
//3-link
//Kernel A: middle link
middle_link_kernel( (float2*)cudaHw.even.data, (float2*)cudaHw.odd.data,
(float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)P3.even.data, (float2*)P3.odd.data,
sig, mu, mThreeSt,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
for(nu=0; nu < 8; nu++){
if (nu == sig || nu == OPP_DIR(sig)
|| nu == mu || nu == OPP_DIR(mu)){
continue;
}
//5-link: middle link
//Kernel B
middle_link_kernel( (float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
(float2*)P5.even.data, (float2*)P5.odd.data,
sig, nu, FiveSt,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
for(rho =0; rho < 8; rho++){
if (rho == sig || rho == OPP_DIR(sig)
|| rho == mu || rho == OPP_DIR(mu)
|| rho == nu || rho == OPP_DIR(nu)){
continue;
}
//7-link: middle link and side link
//kernel C
if(FiveSt.x != 0)coeff.x = SevenSt.x/FiveSt.x ; else coeff.x = 0;
if(FiveSt.y != 0)coeff.y = SevenSt.y/FiveSt.y ; else coeff.y = 0;
all_link_kernel((float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
(float2*)Prhonumu.even.data, (float2*)Prhonumu.odd.data,
(float2*)P7.even.data, (float2*)P7.odd.data,
(float2*)P7rho.even.data, (float2*)P7rho.odd.data,
(float2*)P5.even.data, (float2*)P5.odd.data,
sig, rho, SevenSt,mSevenSt,coeff,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
}//rho
//5-link: side link
//kernel B2
if(ThreeSt.x != 0)coeff.x = FiveSt.x/ThreeSt.x ; else coeff.x = 0;
if(ThreeSt.y != 0)coeff.y = FiveSt.y/ThreeSt.y ; else coeff.y = 0;
side_link_kernel((float2*)P5.even.data, (float2*)P5.odd.data,
(float2*)P5nu.even.data, (float2*)P5nu.odd.data,
(float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
(float2*)P3.even.data, (float2*)P3.odd.data,
sig, nu, mFiveSt, coeff,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
}//nu
//lepage
//Kernel A2
middle_link_kernel( (float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
(float2*)P5.even.data, (float2*)P5.odd.data,
sig, mu, Lepage,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
if(ThreeSt.x != 0)coeff.x = Lepage.x/ThreeSt.x ; else coeff.x = 0;
if(ThreeSt.y != 0)coeff.y = Lepage.y/ThreeSt.y ; else coeff.y = 0;
side_link_kernel((float2*)P5.even.data, (float2*)P5.odd.data,
(float2*)P5nu.even.data, (float2*)P5nu.odd.data,
(float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
(float2*)P3.even.data, (float2*)P3.odd.data,
sig, mu, mLepage,coeff,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
//3-link side link
coeff.x=coeff.y=0;
side_link_kernel((float2*)P3.even.data, (float2*)P3.odd.data,
(float2*)P3mu.even.data, (float2*)P3mu.odd.data,
(float2*)cudaHw.even.data, (float2*)cudaHw.odd.data,
(float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)NULL, (float2*)NULL,
sig, mu, ThreeSt,coeff,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink,
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
//1-link and naik term
if (!DirectLinks[mu]){
DirectLinks[mu]=1;
//kernel Z
one_and_naik_terms_kernel((float2*)cudaHw.even.data, (float2*)cudaHw.odd.data,
(float2*)Pmu.even.data, (float2*)Pmu.odd.data,
(float2*)Pnumu.even.data, (float2*)Pnumu.odd.data,
mu, OneLink, Naik, mNaik,
(float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(),
(float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(),
gridDim, blockDim);
checkCudaError();
}
}//mu
}//sig
cudaUnbindTexture(siteLink0TexSingle_recon);
cudaUnbindTexture(siteLink1TexSingle_recon);
}
#undef Pmu
#undef Pnumu
#undef Prhonumu
#undef P7
#undef P7rho
#undef P7rhonu
#undef P5
#undef P3
#undef P5nu
#undef P3mu
#undef Popmu
#undef Pmumumu
void
fermion_force_cuda(double eps, double weight1, double weight2, void* act_path_coeff,
FullHw cudaHw, cudaGaugeField &siteLink, cudaGaugeField &cudaMom, QudaGaugeParam* param)
{
int i;
FullHw tempvec[8];
if (siteLink.Reconstruct() != QUDA_RECONSTRUCT_12)
errorQuda("Reconstruct type %d not supported for gauge field", siteLink.Reconstruct());
if (cudaMom.Reconstruct() != QUDA_RECONSTRUCT_10)
errorQuda("Reconstruct type %d not supported for momentum field", cudaMom.Reconstruct());
for(i=0;i < 8;i++){
tempvec[i] = createHwQuda(param->X, param->cuda_prec);
}
if (param->cuda_prec == QUDA_DOUBLE_PRECISION){
/*
do_fermion_force_cuda( (double)eps, (double)weight1, (double)weight2, (double*)act_path_coeff,
cudaHw, siteLink, cudaMom, tempvec, param);
*/
errorQuda("Double precision not supported?");
}else{
do_fermion_force_cuda( (float)eps, (float)weight1, (float)weight2, (float*)act_path_coeff,
cudaHw, siteLink, cudaMom, tempvec, param);
}
for(i=0;i < 8;i++){
freeHwQuda(tempvec[i]);
}
}
#undef BLOCK_DIM
#undef FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE
#undef FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE
} // namespace quda
|
3a1ec8f901c8b37f55b815c780bee433633f908f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgeaxpy_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is magma_z_matrix. It can handle both,
dense matrix (vector block) and CSR matrices. For the latter,
it interfaces the cuSPARSE library.
Arguments
---------
@param[in]
alpha magmaDoubleComplex
scalar multiplier.
@param[in]
X magma_z_matrix
input/output matrix Y.
@param[in]
beta magmaDoubleComplex
scalar multiplier.
@param[in,out]
Y magma_z_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgeaxpy(
magmaDoubleComplex alpha,
magma_z_matrix X,
magmaDoubleComplex beta,
magma_z_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
magma_z_matrix C={Magma_CSR};
if( X.storage_type == Magma_DENSE && Y->storage_type == Magma_DENSE ){
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zgeaxpy_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, X.dval, beta, Y->dval );
} else if( X.storage_type == Magma_CSR && Y->storage_type == Magma_CSR ) {
magma_zcuspaxpy( &alpha, X, &beta, *Y, &C, queue );
magma_zmfree( Y, queue );
magma_zmtransfer( C, Y, Magma_DEV, Magma_DEV, queue );
magma_zmfree( &C, queue );
} else {
printf("%% error: matrix addition only supported for DENSE and CSR format.\n");
}
return MAGMA_SUCCESS;
}
| 3a1ec8f901c8b37f55b815c780bee433633f908f.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgeaxpy_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is magma_z_matrix. It can handle both,
dense matrix (vector block) and CSR matrices. For the latter,
it interfaces the cuSPARSE library.
Arguments
---------
@param[in]
alpha magmaDoubleComplex
scalar multiplier.
@param[in]
X magma_z_matrix
input/output matrix Y.
@param[in]
beta magmaDoubleComplex
scalar multiplier.
@param[in,out]
Y magma_z_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgeaxpy(
magmaDoubleComplex alpha,
magma_z_matrix X,
magmaDoubleComplex beta,
magma_z_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
magma_z_matrix C={Magma_CSR};
if( X.storage_type == Magma_DENSE && Y->storage_type == Magma_DENSE ){
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
zgeaxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, X.dval, beta, Y->dval );
} else if( X.storage_type == Magma_CSR && Y->storage_type == Magma_CSR ) {
magma_zcuspaxpy( &alpha, X, &beta, *Y, &C, queue );
magma_zmfree( Y, queue );
magma_zmtransfer( C, Y, Magma_DEV, Magma_DEV, queue );
magma_zmfree( &C, queue );
} else {
printf("%% error: matrix addition only supported for DENSE and CSR format.\n");
}
return MAGMA_SUCCESS;
}
|
cc7466ce913d9e854f8f9598fd5be5261fb91757.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// STD
#include <cassert>
#include <iostream>
// CUDA
#include <hip/hip_cooperative_groups.h>
// NNCPP
#include "tensor.hpp"
#include "activations.hpp"
#include "cuda_tensor_wrapper.cuh"
#include "cuda_utils.cuh"
namespace cg = cooperative_groups;
namespace nncpp
{
__global__ void kerner_relu(CUDATensorWrapper input, CUDATensorWrapper output);
__global__ void kerner_relu_backward(CUDATensorWrapper grad, CUDATensorWrapper input, CUDATensorWrapper output);
__global__ void kerner_sigmoid(CUDATensorWrapper input, CUDATensorWrapper output);
__global__ void kerner_sigmoid_backward(CUDATensorWrapper grad, CUDATensorWrapper input, CUDATensorWrapper output);
__global__ void kerner_softmax(CUDATensorWrapper input, size_t dim, CUDATensorWrapper output, CUDATensorWrapper buffer);
void _elementwise_activation_inplace(Tensor & input, void kernel_func(CUDATensorWrapper, CUDATensorWrapper));
Tensor _elementwise_activation(const Tensor & input, void kernel_func(CUDATensorWrapper, CUDATensorWrapper));
Tensor _elementwise_activation_backward(const Tensor & grad, const Tensor & input,
void kernel_func(CUDATensorWrapper, CUDATensorWrapper, CUDATensorWrapper));
void relu_(Tensor & input)
{
_elementwise_activation_inplace(input, kerner_relu);
}
Tensor relu(const Tensor & input)
{
return _elementwise_activation(input, kerner_relu);
}
void sigmoid_(Tensor & input)
{
_elementwise_activation_inplace(input, kerner_sigmoid);
}
Tensor sigmoid(const Tensor & input)
{
return _elementwise_activation(input, kerner_sigmoid);
}
void softmax_(Tensor & input, size_t dim)
{
assert(dim < 4);
assert(input.device == Device::CUDA);
int grid_size = setup_grid_size(input.numel(), BLOCK_SIZE);
CUDATensorWrapper tw(input);
size_t shape[]{input.shape[0], input.shape[1], input.shape[2], input.shape[3]};
shape[dim] = 1;
auto buffer = Tensor::zeros(shape[0], shape[1], shape[2], shape[3], Device::CUDA);
CUDATensorWrapper buffertw(buffer);
hipLaunchKernelGGL(( kerner_softmax), dim3(grid_size), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(float), 0, tw, dim, tw, buffertw);
CHECK(hipGetLastError());
}
Tensor softmax(const Tensor & input, size_t dim)
{
assert(dim < 4);
assert(input.device == Device::CUDA);
Tensor output = Tensor::zeros_like(input);
int grid_size = setup_grid_size(input.numel(), BLOCK_SIZE);
CUDATensorWrapper itw(input);
CUDATensorWrapper otw(output);
size_t shape[]{input.shape[0], input.shape[1], input.shape[2], input.shape[3]};
shape[dim] = 1;
auto buffer = Tensor::zeros(shape[0], shape[1], shape[2], shape[3], Device::CUDA);
CUDATensorWrapper buffertw(buffer);
hipLaunchKernelGGL(( kerner_softmax), dim3(grid_size), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(float), 0, itw, dim, otw, buffertw);
CHECK(hipGetLastError());
return std::move(output);
}
Tensor ReLU::forward(const Tensor & t)
{
_context.clear();
_context.push_back(t);
return relu(t);
}
Tensor ReLU::backward(const Tensor & grad)
{
assert(!_context.empty());
auto input = _context[0];
return _elementwise_activation_backward(grad, input, kerner_relu_backward);
}
Tensor Sigmoid::forward(const Tensor & t)
{
_context.clear();
_context.push_back(t);
return sigmoid(t);
}
Tensor Sigmoid::backward(const Tensor & grad)
{
assert(!_context.empty());
auto input = _context[0];
return _elementwise_activation_backward(grad, input, kerner_sigmoid_backward);
}
__global__ void kerner_relu(CUDATensorWrapper input, CUDATensorWrapper output)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < input.numel())
{
if (input.at(i) < 0.0f)
{
output.at(i) = 0.0f;
}
else if (input.const_data() != output.data())
{ // copy data if output is not input
output.at(i) = input.at(i);
}
}
}
__global__ void kerner_relu_backward(CUDATensorWrapper grad, CUDATensorWrapper input, CUDATensorWrapper output)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < input.numel())
{
float value = grad.at(i);
if (input.at(i) < 0.0f)
{
value = 0.0f;
}
output.at(i) = value;
}
}
__global__ void kerner_sigmoid(CUDATensorWrapper input, CUDATensorWrapper output)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < input.numel())
{
float exp_i = expf(input.at(i));
output.at(i) = exp_i / (1.0f + exp_i);
}
}
__global__ void kerner_sigmoid_backward(CUDATensorWrapper grad, CUDATensorWrapper input, CUDATensorWrapper output)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < input.numel())
{
float value = input.at(i);
output.at(i) = -1.0f * expf(value) * expm1f(value) * grad.at(i);
}
}
__global__ void kerner_softmax(CUDATensorWrapper input, size_t dim, CUDATensorWrapper output, CUDATensorWrapper buffer)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// softmax(input, dim) = exp(input - max(input)) / sum(exp(input - max(input)), dim)
// a) compute max(input)
_kernel_reduce_op(input, buffer, op_max, nullptr, _atomicMax);
cg::sync(cta);
// b.1) compute output = exp(input - max(input))
float max_input = buffer.at(0);
uint gridSize = blockDim.x * gridDim.x;
{
size_t outputNumel = output.numel();
uint i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < outputNumel)
{
output.at(i) = expf(input.at(i) - max_input);
i += gridSize;
}
}
cg::sync(cta);
// b.2) compute buffer = sum(output, dim)
// initialize buffer to zero:
buffer.at(0) = 0.0f;
_kernel_reduce_op_on_dim(output, dim, buffer, op_sum);
cg::sync(cta);
// c) compute output / buffer
// output.shape=(N,C,H,W) and buffer.shape=(X,Y,Z)
{
size_t bufferNumel = buffer.numel();
uint i = blockIdx.x * blockDim.x + threadIdx.x;
size_t size = output.shape[dim];
float denom;
size_t bufIndices[4], outIndex;
while (i < bufferNumel)
{
denom = buffer.at(i);
buffer.convert_from_linear(i, bufIndices);
for (size_t j = 0; j < size; j++)
{
bufIndices[dim] = j;
outIndex = output.convert_to_linear(bufIndices);
output.at(outIndex) = output.at(outIndex) / denom;
}
i += gridSize;
}
}
cg::sync(cta);
}
void _elementwise_activation_inplace(Tensor & input, void kernel_func(CUDATensorWrapper, CUDATensorWrapper))
{
assert(input.device == Device::CUDA);
int grid_size = setup_grid_size(input.numel(), BLOCK_SIZE);
CUDATensorWrapper tw(input);
hipLaunchKernelGGL(( kernel_func), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, tw, tw);
CHECK(hipGetLastError());
}
Tensor _elementwise_activation(const Tensor & input, void kernel_func(CUDATensorWrapper, CUDATensorWrapper))
{
assert(input.device == Device::CUDA);
Tensor output = Tensor::zeros_like(input);
int grid_size = setup_grid_size(input.numel(), BLOCK_SIZE);
CUDATensorWrapper itw(input);
CUDATensorWrapper otw(output);
hipLaunchKernelGGL(( kernel_func), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, itw, otw);
CHECK(hipGetLastError());
return std::move(output);
}
Tensor _elementwise_activation_backward(
const Tensor & grad,
const Tensor & input,
void kernel_func(CUDATensorWrapper, CUDATensorWrapper, CUDATensorWrapper))
{
assert(grad.device == Device::CUDA);
assert(input.device == Device::CUDA);
Tensor output = Tensor::zeros_like(grad);
int grid_size = setup_grid_size(input.numel(), BLOCK_SIZE);
CUDATensorWrapper itw(input);
CUDATensorWrapper otw(output);
CUDATensorWrapper gtw(grad);
hipLaunchKernelGGL(( kernel_func), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, gtw, itw, otw);
CHECK(hipGetLastError());
return std::move(output);
}
} | cc7466ce913d9e854f8f9598fd5be5261fb91757.cu | // STD
#include <cassert>
#include <iostream>
// CUDA
#include <cooperative_groups.h>
// NNCPP
#include "tensor.hpp"
#include "activations.hpp"
#include "cuda_tensor_wrapper.cuh"
#include "cuda_utils.cuh"
namespace cg = cooperative_groups;
namespace nncpp
{
__global__ void kerner_relu(CUDATensorWrapper input, CUDATensorWrapper output);
__global__ void kerner_relu_backward(CUDATensorWrapper grad, CUDATensorWrapper input, CUDATensorWrapper output);
__global__ void kerner_sigmoid(CUDATensorWrapper input, CUDATensorWrapper output);
__global__ void kerner_sigmoid_backward(CUDATensorWrapper grad, CUDATensorWrapper input, CUDATensorWrapper output);
__global__ void kerner_softmax(CUDATensorWrapper input, size_t dim, CUDATensorWrapper output, CUDATensorWrapper buffer);
void _elementwise_activation_inplace(Tensor & input, void kernel_func(CUDATensorWrapper, CUDATensorWrapper));
Tensor _elementwise_activation(const Tensor & input, void kernel_func(CUDATensorWrapper, CUDATensorWrapper));
Tensor _elementwise_activation_backward(const Tensor & grad, const Tensor & input,
void kernel_func(CUDATensorWrapper, CUDATensorWrapper, CUDATensorWrapper));
void relu_(Tensor & input)
{
_elementwise_activation_inplace(input, kerner_relu);
}
Tensor relu(const Tensor & input)
{
return _elementwise_activation(input, kerner_relu);
}
void sigmoid_(Tensor & input)
{
_elementwise_activation_inplace(input, kerner_sigmoid);
}
Tensor sigmoid(const Tensor & input)
{
return _elementwise_activation(input, kerner_sigmoid);
}
void softmax_(Tensor & input, size_t dim)
{
assert(dim < 4);
assert(input.device == Device::CUDA);
int grid_size = setup_grid_size(input.numel(), BLOCK_SIZE);
CUDATensorWrapper tw(input);
size_t shape[]{input.shape[0], input.shape[1], input.shape[2], input.shape[3]};
shape[dim] = 1;
auto buffer = Tensor::zeros(shape[0], shape[1], shape[2], shape[3], Device::CUDA);
CUDATensorWrapper buffertw(buffer);
kerner_softmax<<<grid_size, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(tw, dim, tw, buffertw);
CHECK(cudaGetLastError());
}
Tensor softmax(const Tensor & input, size_t dim)
{
assert(dim < 4);
assert(input.device == Device::CUDA);
Tensor output = Tensor::zeros_like(input);
int grid_size = setup_grid_size(input.numel(), BLOCK_SIZE);
CUDATensorWrapper itw(input);
CUDATensorWrapper otw(output);
size_t shape[]{input.shape[0], input.shape[1], input.shape[2], input.shape[3]};
shape[dim] = 1;
auto buffer = Tensor::zeros(shape[0], shape[1], shape[2], shape[3], Device::CUDA);
CUDATensorWrapper buffertw(buffer);
kerner_softmax<<<grid_size, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(itw, dim, otw, buffertw);
CHECK(cudaGetLastError());
return std::move(output);
}
Tensor ReLU::forward(const Tensor & t)
{
_context.clear();
_context.push_back(t);
return relu(t);
}
Tensor ReLU::backward(const Tensor & grad)
{
assert(!_context.empty());
auto input = _context[0];
return _elementwise_activation_backward(grad, input, kerner_relu_backward);
}
Tensor Sigmoid::forward(const Tensor & t)
{
_context.clear();
_context.push_back(t);
return sigmoid(t);
}
Tensor Sigmoid::backward(const Tensor & grad)
{
assert(!_context.empty());
auto input = _context[0];
return _elementwise_activation_backward(grad, input, kerner_sigmoid_backward);
}
__global__ void kerner_relu(CUDATensorWrapper input, CUDATensorWrapper output)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < input.numel())
{
if (input.at(i) < 0.0f)
{
output.at(i) = 0.0f;
}
else if (input.const_data() != output.data())
{ // copy data if output is not input
output.at(i) = input.at(i);
}
}
}
__global__ void kerner_relu_backward(CUDATensorWrapper grad, CUDATensorWrapper input, CUDATensorWrapper output)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < input.numel())
{
float value = grad.at(i);
if (input.at(i) < 0.0f)
{
value = 0.0f;
}
output.at(i) = value;
}
}
__global__ void kerner_sigmoid(CUDATensorWrapper input, CUDATensorWrapper output)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < input.numel())
{
float exp_i = expf(input.at(i));
output.at(i) = exp_i / (1.0f + exp_i);
}
}
__global__ void kerner_sigmoid_backward(CUDATensorWrapper grad, CUDATensorWrapper input, CUDATensorWrapper output)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < input.numel())
{
float value = input.at(i);
output.at(i) = -1.0f * expf(value) * expm1f(value) * grad.at(i);
}
}
__global__ void kerner_softmax(CUDATensorWrapper input, size_t dim, CUDATensorWrapper output, CUDATensorWrapper buffer)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// softmax(input, dim) = exp(input - max(input)) / sum(exp(input - max(input)), dim)
// a) compute max(input)
_kernel_reduce_op(input, buffer, op_max, nullptr, _atomicMax);
cg::sync(cta);
// b.1) compute output = exp(input - max(input))
float max_input = buffer.at(0);
uint gridSize = blockDim.x * gridDim.x;
{
size_t outputNumel = output.numel();
uint i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < outputNumel)
{
output.at(i) = expf(input.at(i) - max_input);
i += gridSize;
}
}
cg::sync(cta);
// b.2) compute buffer = sum(output, dim)
// initialize buffer to zero:
buffer.at(0) = 0.0f;
_kernel_reduce_op_on_dim(output, dim, buffer, op_sum);
cg::sync(cta);
// c) compute output / buffer
// output.shape=(N,C,H,W) and buffer.shape=(X,Y,Z)
{
size_t bufferNumel = buffer.numel();
uint i = blockIdx.x * blockDim.x + threadIdx.x;
size_t size = output.shape[dim];
float denom;
size_t bufIndices[4], outIndex;
while (i < bufferNumel)
{
denom = buffer.at(i);
buffer.convert_from_linear(i, bufIndices);
for (size_t j = 0; j < size; j++)
{
bufIndices[dim] = j;
outIndex = output.convert_to_linear(bufIndices);
output.at(outIndex) = output.at(outIndex) / denom;
}
i += gridSize;
}
}
cg::sync(cta);
}
void _elementwise_activation_inplace(Tensor & input, void kernel_func(CUDATensorWrapper, CUDATensorWrapper))
{
assert(input.device == Device::CUDA);
int grid_size = setup_grid_size(input.numel(), BLOCK_SIZE);
CUDATensorWrapper tw(input);
kernel_func<<<grid_size, BLOCK_SIZE>>>(tw, tw);
CHECK(cudaGetLastError());
}
Tensor _elementwise_activation(const Tensor & input, void kernel_func(CUDATensorWrapper, CUDATensorWrapper))
{
assert(input.device == Device::CUDA);
Tensor output = Tensor::zeros_like(input);
int grid_size = setup_grid_size(input.numel(), BLOCK_SIZE);
CUDATensorWrapper itw(input);
CUDATensorWrapper otw(output);
kernel_func<<<grid_size, BLOCK_SIZE>>>(itw, otw);
CHECK(cudaGetLastError());
return std::move(output);
}
Tensor _elementwise_activation_backward(
const Tensor & grad,
const Tensor & input,
void kernel_func(CUDATensorWrapper, CUDATensorWrapper, CUDATensorWrapper))
{
assert(grad.device == Device::CUDA);
assert(input.device == Device::CUDA);
Tensor output = Tensor::zeros_like(grad);
int grid_size = setup_grid_size(input.numel(), BLOCK_SIZE);
CUDATensorWrapper itw(input);
CUDATensorWrapper otw(output);
CUDATensorWrapper gtw(grad);
kernel_func<<<grid_size, BLOCK_SIZE>>>(gtw, itw, otw);
CHECK(cudaGetLastError());
return std::move(output);
}
} |
218c017650bede1894699f3bb0504e56222e2c3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define DOUBLE
#ifdef DOUBLE
#define Complex hipfftDoubleComplex
#define Real double
#define Transform HIPFFT_Z2Z
#define TransformExec hipfftExecZ2Z
#else
#define Complex hipfftComplex
#define Real float
#define Transform HIPFFT_C2C
#define TransformExec hipfftExecC2C
#endif
#define TILE_DIM 8
// synchronize blocks
__global__ void zero(int nx, int ny, int nz, Real *z) {
int tj = threadIdx.x;
int td = blockDim.x;
int blockData = (nx*ny*nz)/(gridDim.x*gridDim.y);
int jj = ((blockIdx.y)*gridDim.x + (blockIdx.x))*blockData;
for (int k=0; k<blockData/td; k++) {
z[jj + tj+ k*td] = 0.0;
}
} | 218c017650bede1894699f3bb0504e56222e2c3f.cu | #include "includes.h"
#define DOUBLE
#ifdef DOUBLE
#define Complex cufftDoubleComplex
#define Real double
#define Transform CUFFT_Z2Z
#define TransformExec cufftExecZ2Z
#else
#define Complex cufftComplex
#define Real float
#define Transform CUFFT_C2C
#define TransformExec cufftExecC2C
#endif
#define TILE_DIM 8
// synchronize blocks
__global__ void zero(int nx, int ny, int nz, Real *z) {
int tj = threadIdx.x;
int td = blockDim.x;
int blockData = (nx*ny*nz)/(gridDim.x*gridDim.y);
int jj = ((blockIdx.y)*gridDim.x + (blockIdx.x))*blockData;
for (int k=0; k<blockData/td; k++) {
z[jj + tj+ k*td] = 0.0;
}
} |
471f4639a663c5c80bc6563831ec828cde7df683.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cudaconv2.cuh>
#define LO16(x) ((x) & 0x0000FFFF)
#define HI16(x) ((x) >> 16)
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelsPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_c(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelsPerThread * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / B_X;
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = B_X * (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shImgLoad = &shImages[loadY][loadX];
float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
prod[c][p] = 0;
}
}
__shared__ int pxDivs[B_Y*pixelsPerThread];
if (tidx < B_Y * pixelsPerThread) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / filterSize) << 16) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y * pixelsPerThread) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y * pixelsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelsPerThread) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]); // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_X % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
prod[c][p] += shImages[threadIdx.y + p * B_Y + c * pixelsPerThread * B_Y][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters] + scaleOutputs * prod[c][p];
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters] = scaleOutputs * prod[c][p];
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y pixels and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel, color batch of B_Y * colorsPerThread
* In essence, blockIdx.y.x = 0...numFilterColors / colorsPerThread
* blockIdx.y.y = 0...DIVUP(numPixels, B_Y)
* ============
* CONSTRAINTS:
* ============
* numFilters/numGroups must be divisible by B_X * filtersPerThread
* numImgColors/numGroups must be divisible by colorsPerThread
* numFilters must be divisible by numGroups
* numImgColors must be divisible by numGroups
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
*
* This routine is especially fast when numFilters >= 32. That's when it should be used.
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_mc_mf(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/colorsPerThread)) * B_Y;
const int filterColorIdx = (blockIdx.y % (numFilterColors/colorsPerThread)) * colorsPerThread;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride + loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] = 0;
}
}
// This avoids doing a division in an inner loop
__shared__ int pxDivs[B_Y];
if (tidx < B_Y) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / filterSize) << 16) + (blockPixelOffset + tidx) % filterSize;
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_Y % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X * filtersPerThread && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y pixels and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel, color batch of B_Y * colorsPerThread
* In essence, blockIdx.y.x = 0...numFilterColors / colorsPerThread
* blockIdx.y.y = 0...DIVUP(numPixels, B_Y)
* ============
* CONSTRAINTS:
* ============
* numFilters/numGroups must be divisible by B_X * filtersPerThread
* numFilterColors must be divisible by colorsPerThread
* numFilters must be divisible by numGroups
* numImgColors must be divisible by numFilterColors
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModules, numFilterColors, filterPixels, numFilters)
* colorIndices: (numGroups, numFilterColors)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
*
* This routine is especially fast when numFilters >= 32. That's when it should be used.
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_mc_mf_rand(float* images, float* hidActs, float* targets, int* colorIndices,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numFilterColors, const int numGroups, const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
__shared__ int shColors[colorsPerThread];
// This avoids doing a division in an inner loop
__shared__ int pxDivs[B_Y];
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/colorsPerThread)) * B_Y;
const int filterColorIdx = (blockIdx.y % (numFilterColors/colorsPerThread)) * colorsPerThread;
// const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] = 0;
}
}
if (tidx < B_Y) {
pxDivs[tidx] = ((blockPixelOffset + tidx) / filterSize << 16) + ((blockPixelOffset + tidx) % filterSize);
}
if (tidx < colorsPerThread) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * imgStride;
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_Y % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = images[caseIdx + shColors[c] + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X * filtersPerThread && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModules, numImages)
*
* targets: (numModuleY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*
* TODO: you can get a slight speed boost for local non-convolutional units by writing special
* routines for partialSum = 1. But I dunno if the code duplication is worth it...
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _weightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors,
int numGroups, int partialSum, float scaleTargets, float scaleOutput) {
int numFilterColors = numImgColors / numGroups;
int imgStride = images.getStride();
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int numModules = numModulesY * numModulesX;
int numFilters = hidActs.getNumRows() / numModules;
int numFiltersPerGroup = numFilters / numGroups;
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(imgSizeY * imgSizeX == imgPixels);
assert(images.getNumRows() == imgPixels * numImgColors);
int filterPixels = filterSize * filterSize;
partialSum = partialSum == 0 ? numModules : partialSum;
assert(numModules % partialSum == 0);
assert(hidActs.getNumCols() == numImages);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(numModules * numFilters == hidActs.getNumRows());
assert(!images.isTrans());
assert(!hidActs.isTrans());
assert(hidActs.isContiguous());
assert(!targets.isTrans());
assert(targets.isContiguous());
int preloadCases = 32;
dim3 blocks, threads;
int bx, by;
int pixelsPerThread, filtersPerThread, colorsPerThread;
// Worth playing with these parameters to find best values for your problem.
// These values work relatively well, but not optimal for all problems.
if (numFilterColors > 3) {
filtersPerThread = numFiltersPerGroup % 32 == 0 ? 2 : 1;
colorsPerThread = numFilterColors % 8 == 0 ? 8 : 4;
by = numFiltersPerGroup % 64 == 0 ? 4 : 8;
bx = numFiltersPerGroup % 64 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by) * (numFilterColors / colorsPerThread));
} else {
assert(numGroups == 1); // Just for sanity
pixelsPerThread = numFilters % 32 == 0 ? (numImgColors == 1 ? 8 : 5) : (numImgColors == 1 ? 5 : 2);
by = numFilters % 32 == 0 ? 4 : 8; // by == 4 seems to work best
bx = numFilters % 32 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/bx), DIVUP(filterPixels, by*pixelsPerThread));
}
assert((by * bx) % preloadCases == 0);
threads = dim3(bx, by);
bool checkCaseBounds = numImages % 32 != 0;
if (scaleTargets == 0) {
targets.resize((numModules/partialSum) * numFilterColors*filterPixels, numFilters);
} else {
assert(targets.getNumRows() == (numModules/partialSum) * numFilterColors*filterPixels);
assert(targets.getNumCols() == numFilters);
}
if (numFilterColors > 3) {
if (scaleTargets == 0) { // do not scale
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
} else {
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
}
} else { // numColors in 1,2,3
if (scaleTargets == 0) { // do not scale
if (numFilterColors == 1) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 2) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 3) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors == 1) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 2) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 3) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
}
}
}
cuvSafeCall(hipDeviceSynchronize());
}
void convWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, 0, 1);
}
void convWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum,
float scaleTargets, float scaleOutput) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
void localWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, 0, 1);
}
void localWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1,
scaleTargets, scaleOutput);
}
/*
* images: (numImgColors, imgPixels, numImages), with stride given
* hidActs: (numFilters, numModules, numImages)
*
* targets: (numModules/partialSum, numFilterColors, filterPixels, numFilters)
* colorIndices: (numGroups, numFilterColors)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _weightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups, int partialSum,
float scaleTargets, float scaleOutput) {
int imgStride = images.getStride();
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int numModules = numModulesY * numModulesX;
int numFilters = hidActs.getNumRows() / numModules;
int numFiltersPerGroup = numFilters / numGroups;
assert(numGroups > 1);
assert(numImgColors % numFilterColors == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numFilterColors % 4 == 0);
assert(imgSizeY * imgSizeX == imgPixels);
assert(images.getNumRows() == imgPixels * numImgColors);
int filterPixels = filterSize * filterSize;
partialSum = partialSum == 0 ? numModules : partialSum;
assert(numModules % partialSum == 0);
assert(hidActs.getNumCols() == numImages);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(numModules * numFilters == hidActs.getNumRows());
assert(!images.isTrans());
assert(!hidActs.isTrans());
assert(hidActs.isContiguous());
assert(!targets.isTrans());
assert(targets.isContiguous());
int preloadCases = 32;
dim3 blocks, threads;
int bx, by;
int filtersPerThread, colorsPerThread;
filtersPerThread = numFiltersPerGroup % 32 == 0 ? 2 : 1;
colorsPerThread = numFilterColors % 8 == 0 ? 8 : 4;
by = numFiltersPerGroup % 64 == 0 ? 4 : 8;
bx = numFiltersPerGroup % 64 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by) * (numFilterColors / colorsPerThread));
assert((by * bx) % preloadCases == 0);
threads = dim3(bx, by);
bool checkCaseBounds = numImages % 32 != 0;
if (scaleTargets == 0) {
targets.resize((numModules/partialSum) * numFilterColors*filterPixels, numFilters);
} else {
assert(targets.getNumRows() == (numModules/partialSum) * numFilterColors*filterPixels);
assert(targets.getNumCols() == numFilters);
}
if (scaleTargets == 0) { // do not scale
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
} else {
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
}
cuvSafeCall(hipDeviceSynchronize());
}
void convWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, 0);
}
void convWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors,
int numGroups, int partialSum, float scaleTargets, float scaleOutput) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
void localWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 1, 1, 0);
}
void localWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors,
int numGroups, float scaleTargets, float scaleOutput) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 1, scaleTargets, scaleOutput);
}
| 471f4639a663c5c80bc6563831ec828cde7df683.cu | /*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cudaconv2.cuh>
#define LO16(x) ((x) & 0x0000FFFF)
#define HI16(x) ((x) >> 16)
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelsPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_c(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelsPerThread * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / B_X;
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = B_X * (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shImgLoad = &shImages[loadY][loadX];
float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
prod[c][p] = 0;
}
}
__shared__ int pxDivs[B_Y*pixelsPerThread];
if (tidx < B_Y * pixelsPerThread) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / filterSize) << 16) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y * pixelsPerThread) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y * pixelsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelsPerThread) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]); // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_X % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
prod[c][p] += shImages[threadIdx.y + p * B_Y + c * pixelsPerThread * B_Y][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters] + scaleOutputs * prod[c][p];
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters] = scaleOutputs * prod[c][p];
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y pixels and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel, color batch of B_Y * colorsPerThread
* In essence, blockIdx.y.x = 0...numFilterColors / colorsPerThread
* blockIdx.y.y = 0...DIVUP(numPixels, B_Y)
* ============
* CONSTRAINTS:
* ============
* numFilters/numGroups must be divisible by B_X * filtersPerThread
* numImgColors/numGroups must be divisible by colorsPerThread
* numFilters must be divisible by numGroups
* numImgColors must be divisible by numGroups
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
*
* This routine is especially fast when numFilters >= 32. That's when it should be used.
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_mc_mf(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/colorsPerThread)) * B_Y;
const int filterColorIdx = (blockIdx.y % (numFilterColors/colorsPerThread)) * colorsPerThread;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride + loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] = 0;
}
}
// This avoids doing a division in an inner loop
__shared__ int pxDivs[B_Y];
if (tidx < B_Y) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / filterSize) << 16) + (blockPixelOffset + tidx) % filterSize;
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_Y % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X * filtersPerThread && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y pixels and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel, color batch of B_Y * colorsPerThread
* In essence, blockIdx.y.x = 0...numFilterColors / colorsPerThread
* blockIdx.y.y = 0...DIVUP(numPixels, B_Y)
* ============
* CONSTRAINTS:
* ============
* numFilters/numGroups must be divisible by B_X * filtersPerThread
* numFilterColors must be divisible by colorsPerThread
* numFilters must be divisible by numGroups
* numImgColors must be divisible by numFilterColors
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModules, numFilterColors, filterPixels, numFilters)
* colorIndices: (numGroups, numFilterColors)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
*
* This routine is especially fast when numFilters >= 32. That's when it should be used.
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_mc_mf_rand(float* images, float* hidActs, float* targets, int* colorIndices,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numFilterColors, const int numGroups, const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
__shared__ int shColors[colorsPerThread];
// This avoids doing a division in an inner loop
__shared__ int pxDivs[B_Y];
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/colorsPerThread)) * B_Y;
const int filterColorIdx = (blockIdx.y % (numFilterColors/colorsPerThread)) * colorsPerThread;
// const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] = 0;
}
}
if (tidx < B_Y) {
pxDivs[tidx] = ((blockPixelOffset + tidx) / filterSize << 16) + ((blockPixelOffset + tidx) % filterSize);
}
if (tidx < colorsPerThread) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * imgStride;
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_Y % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = images[caseIdx + shColors[c] + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X * filtersPerThread && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModules, numImages)
*
* targets: (numModuleY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*
* TODO: you can get a slight speed boost for local non-convolutional units by writing special
* routines for partialSum = 1. But I dunno if the code duplication is worth it...
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _weightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors,
int numGroups, int partialSum, float scaleTargets, float scaleOutput) {
int numFilterColors = numImgColors / numGroups;
int imgStride = images.getStride();
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int numModules = numModulesY * numModulesX;
int numFilters = hidActs.getNumRows() / numModules;
int numFiltersPerGroup = numFilters / numGroups;
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(imgSizeY * imgSizeX == imgPixels);
assert(images.getNumRows() == imgPixels * numImgColors);
int filterPixels = filterSize * filterSize;
partialSum = partialSum == 0 ? numModules : partialSum;
assert(numModules % partialSum == 0);
assert(hidActs.getNumCols() == numImages);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(numModules * numFilters == hidActs.getNumRows());
assert(!images.isTrans());
assert(!hidActs.isTrans());
assert(hidActs.isContiguous());
assert(!targets.isTrans());
assert(targets.isContiguous());
int preloadCases = 32;
dim3 blocks, threads;
int bx, by;
int pixelsPerThread, filtersPerThread, colorsPerThread;
// Worth playing with these parameters to find best values for your problem.
// These values work relatively well, but not optimal for all problems.
if (numFilterColors > 3) {
filtersPerThread = numFiltersPerGroup % 32 == 0 ? 2 : 1;
colorsPerThread = numFilterColors % 8 == 0 ? 8 : 4;
by = numFiltersPerGroup % 64 == 0 ? 4 : 8;
bx = numFiltersPerGroup % 64 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by) * (numFilterColors / colorsPerThread));
} else {
assert(numGroups == 1); // Just for sanity
pixelsPerThread = numFilters % 32 == 0 ? (numImgColors == 1 ? 8 : 5) : (numImgColors == 1 ? 5 : 2);
by = numFilters % 32 == 0 ? 4 : 8; // by == 4 seems to work best
bx = numFilters % 32 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/bx), DIVUP(filterPixels, by*pixelsPerThread));
}
assert((by * bx) % preloadCases == 0);
threads = dim3(bx, by);
bool checkCaseBounds = numImages % 32 != 0;
if (scaleTargets == 0) {
targets.resize((numModules/partialSum) * numFilterColors*filterPixels, numFilters);
} else {
assert(targets.getNumRows() == (numModules/partialSum) * numFilterColors*filterPixels);
assert(targets.getNumCols() == numFilters);
}
if (numFilterColors > 3) {
if (scaleTargets == 0) { // do not scale
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
} else {
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
}
} else { // numColors in 1,2,3
if (scaleTargets == 0) { // do not scale
if (numFilterColors == 1) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,8,32,1,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,5,32,1,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,8,32,1,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,5,32,1,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 2) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,2,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,2,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,2,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,2,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 3) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,3,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,3,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,3,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,3,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors == 1) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,8,32,1,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,5,32,1,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,8,32,1,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,5,32,1,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 2) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,2,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,2,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,2,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,2,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 3) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,3,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,3,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,3,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,3,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
}
}
}
cuvSafeCall(cudaThreadSynchronize());
}
void convWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, 0, 1);
}
void convWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum,
float scaleTargets, float scaleOutput) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
void localWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, 0, 1);
}
void localWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1,
scaleTargets, scaleOutput);
}
/*
* images: (numImgColors, imgPixels, numImages), with stride given
* hidActs: (numFilters, numModules, numImages)
*
* targets: (numModules/partialSum, numFilterColors, filterPixels, numFilters)
* colorIndices: (numGroups, numFilterColors)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _weightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups, int partialSum,
float scaleTargets, float scaleOutput) {
int imgStride = images.getStride();
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int numModules = numModulesY * numModulesX;
int numFilters = hidActs.getNumRows() / numModules;
int numFiltersPerGroup = numFilters / numGroups;
assert(numGroups > 1);
assert(numImgColors % numFilterColors == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numFilterColors % 4 == 0);
assert(imgSizeY * imgSizeX == imgPixels);
assert(images.getNumRows() == imgPixels * numImgColors);
int filterPixels = filterSize * filterSize;
partialSum = partialSum == 0 ? numModules : partialSum;
assert(numModules % partialSum == 0);
assert(hidActs.getNumCols() == numImages);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(numModules * numFilters == hidActs.getNumRows());
assert(!images.isTrans());
assert(!hidActs.isTrans());
assert(hidActs.isContiguous());
assert(!targets.isTrans());
assert(targets.isContiguous());
int preloadCases = 32;
dim3 blocks, threads;
int bx, by;
int filtersPerThread, colorsPerThread;
filtersPerThread = numFiltersPerGroup % 32 == 0 ? 2 : 1;
colorsPerThread = numFilterColors % 8 == 0 ? 8 : 4;
by = numFiltersPerGroup % 64 == 0 ? 4 : 8;
bx = numFiltersPerGroup % 64 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by) * (numFilterColors / colorsPerThread));
assert((by * bx) % preloadCases == 0);
threads = dim3(bx, by);
bool checkCaseBounds = numImages % 32 != 0;
if (scaleTargets == 0) {
targets.resize((numModules/partialSum) * numFilterColors*filterPixels, numFilters);
} else {
assert(targets.getNumRows() == (numModules/partialSum) * numFilterColors*filterPixels);
assert(targets.getNumCols() == numFilters);
}
if (scaleTargets == 0) { // do not scale
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
} else {
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
}
cuvSafeCall(cudaThreadSynchronize());
}
void convWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, 0);
}
void convWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors,
int numGroups, int partialSum, float scaleTargets, float scaleOutput) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
void localWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 1, 1, 0);
}
void localWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors,
int numGroups, float scaleTargets, float scaleOutput) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 1, scaleTargets, scaleOutput);
}
|
32352ce8c6c85819f6b39c0b6b107d0782d36526.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <string.h>
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
void readInput(char *fileName, image &img) {
FILE *inputImageFile;
inputImageFile = fopen(fileName, "rb");
img.pType = new char[3];
fgets(img.pType, sizeof(img.pType), inputImageFile);
char c = getc(inputImageFile);
while (c == '#') {
while (getc(inputImageFile) != '\n');
c = getc(inputImageFile);
}
ungetc(c, inputImageFile);
fscanf(inputImageFile, "%d%d", &img.width, &img.height);
fscanf(inputImageFile, "%d", &img.maxValColor);
while (fgetc(inputImageFile) != '\n');
if (img.pType[1] == '5') {
img.data = (unsigned char*)malloc(img.height * img.width);
fread(img.data, sizeof(unsigned char), img.height * img.width, inputImageFile);
}
fclose(inputImageFile);
}
/**
Writes an image to the output file.
*/
void writeData(const char *fileName, image img) {
FILE *outputImageFile;
outputImageFile = fopen(fileName, "wb");
fprintf(outputImageFile, "%s\n", img.pType);
fprintf(outputImageFile, "%d %d\n", img.width, img.height);
fprintf(outputImageFile, "%d\n", img.maxValColor);
fwrite(img.data, sizeof(unsigned char), img.height * img.width, outputImageFile);
fclose(outputImageFile);
}
/**
Copies generic data from the input image to output image
*/
void copyPropertiesToImage(image i, image &o) {
o.pType = new char[3];
strcpy(o.pType, "P5");
o.width = i.width;
o.height = i.height;
o.maxValColor = i.maxValColor;
}
__global__ void applyGaussianFilter(unsigned char *input, unsigned char *output, float *kernel, int iHeight, int iWidth, int kWidth) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
double sum = 0.0;
int halvedKW = kWidth / 2;
for (int i = -halvedKW; i <= halvedKW; i++) {
for (int j = -halvedKW; j <= halvedKW; j++) {
if ((x + j) < iWidth && (x + j) >= 0 && (y + i) < iHeight && (y + i) >= 0) {
int kPosX = (j + halvedKW);
int kPosY = (i + halvedKW);
sum = sum + (float)(input[(y + i) * iWidth + (x + j)]) * kernel[kPosY * kWidth + kPosX];
}
}
}
if (sum > 255.0)
sum = 255.0;
output[y * iWidth + x] = (unsigned char)sum;
}
__global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int gx, gy;
if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) {
gx =
1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] +
2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] +
1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)];
gy =
1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] +
(-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)];
intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy));
direction[y * iw + x] = atan2((float)gy, (float)gx);
}
}
int main(int argc, char *argv[]) {
cout << argv[1] << endl;
image input, output;
readInput(argv[1], input);
float gaussianKernel[] = {
1.0 / 273.0, 4.0 / 273.0, 7.0 / 273.0, 4.0 / 273.0, 1.0 / 273.0,
4.0 / 273.0, 16.0 / 273.0, 26.0 / 273.0, 16.0 / 273.0, 4.0 / 273.0,
7.0 / 273.0, 26.0 / 273.0, 41.0 / 273.0, 26.0 / 273.0, 7.0 / 273.0,
4.0 / 273.0, 16.0 / 273.0, 26.0 / 273.0, 16.0 / 273.0, 4.0 / 273.0,
1.0 / 273.0, 4.0 / 273.0, 7.0 / 273.0, 4.0 / 273.0, 1.0 / 273.0
};
unsigned char *d_gaussInput, *d_gaussOutput, *d_sobelOutput;
float *d_gaussKernel, *d_gradDirections;
int imgRes = input.height * input.width;
dim3 blocks(input.width / 16, input.height / 16);
dim3 threads(16, 16);
hipMalloc(&d_gaussInput, imgRes);
hipMalloc(&d_gaussOutput, imgRes);
hipMalloc(&d_gaussKernel, GAUSS_WIDTH * GAUSS_WIDTH * sizeof(float));
hipMemcpy(d_gaussInput, input.data, imgRes, hipMemcpyHostToDevice);
hipMemcpy(d_gaussKernel, gaussianKernel, GAUSS_WIDTH * GAUSS_WIDTH * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( applyGaussianFilter) , dim3(blocks), dim3(threads) , 0, 0, d_gaussInput, d_gaussOutput, d_gaussKernel, input.height, input.width, GAUSS_WIDTH);
hipMalloc(&d_sobelOutput, imgRes);
hipMalloc(&d_gradDirections, imgRes * sizeof(float));
hipMemcpy(d_sobelOutput, d_gaussOutput, imgRes, hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( applySobelFilter) , dim3(blocks), dim3(threads) , 0, 0, d_gaussOutput, d_sobelOutput, d_gradDirections, input.height, input.width);
copyPropertiesToImage(input, output);
output.data = (unsigned char*)malloc(output.height * output.width);
hipMemcpy(output.data, d_sobelOutput, imgRes, hipMemcpyDeviceToHost);
hipFree(d_gaussKernel);
hipFree(d_gaussInput);
hipFree(d_gaussOutput);
hipFree(d_sobelOutput);
hipFree(d_gradDirections);
writeData(argv[2], output);
system("pause");
return 0;
} | 32352ce8c6c85819f6b39c0b6b107d0782d36526.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <string.h>
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
void readInput(char *fileName, image &img) {
FILE *inputImageFile;
inputImageFile = fopen(fileName, "rb");
img.pType = new char[3];
fgets(img.pType, sizeof(img.pType), inputImageFile);
char c = getc(inputImageFile);
while (c == '#') {
while (getc(inputImageFile) != '\n');
c = getc(inputImageFile);
}
ungetc(c, inputImageFile);
fscanf(inputImageFile, "%d%d", &img.width, &img.height);
fscanf(inputImageFile, "%d", &img.maxValColor);
while (fgetc(inputImageFile) != '\n');
if (img.pType[1] == '5') {
img.data = (unsigned char*)malloc(img.height * img.width);
fread(img.data, sizeof(unsigned char), img.height * img.width, inputImageFile);
}
fclose(inputImageFile);
}
/**
Writes an image to the output file.
*/
void writeData(const char *fileName, image img) {
FILE *outputImageFile;
outputImageFile = fopen(fileName, "wb");
fprintf(outputImageFile, "%s\n", img.pType);
fprintf(outputImageFile, "%d %d\n", img.width, img.height);
fprintf(outputImageFile, "%d\n", img.maxValColor);
fwrite(img.data, sizeof(unsigned char), img.height * img.width, outputImageFile);
fclose(outputImageFile);
}
/**
Copies generic data from the input image to output image
*/
void copyPropertiesToImage(image i, image &o) {
o.pType = new char[3];
strcpy(o.pType, "P5");
o.width = i.width;
o.height = i.height;
o.maxValColor = i.maxValColor;
}
__global__ void applyGaussianFilter(unsigned char *input, unsigned char *output, float *kernel, int iHeight, int iWidth, int kWidth) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
double sum = 0.0;
int halvedKW = kWidth / 2;
for (int i = -halvedKW; i <= halvedKW; i++) {
for (int j = -halvedKW; j <= halvedKW; j++) {
if ((x + j) < iWidth && (x + j) >= 0 && (y + i) < iHeight && (y + i) >= 0) {
int kPosX = (j + halvedKW);
int kPosY = (i + halvedKW);
sum = sum + (float)(input[(y + i) * iWidth + (x + j)]) * kernel[kPosY * kWidth + kPosX];
}
}
}
if (sum > 255.0)
sum = 255.0;
output[y * iWidth + x] = (unsigned char)sum;
}
__global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int gx, gy;
if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) {
gx =
1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] +
2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] +
1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)];
gy =
1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] +
(-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)];
intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy));
direction[y * iw + x] = atan2((float)gy, (float)gx);
}
}
int main(int argc, char *argv[]) {
cout << argv[1] << endl;
image input, output;
readInput(argv[1], input);
float gaussianKernel[] = {
1.0 / 273.0, 4.0 / 273.0, 7.0 / 273.0, 4.0 / 273.0, 1.0 / 273.0,
4.0 / 273.0, 16.0 / 273.0, 26.0 / 273.0, 16.0 / 273.0, 4.0 / 273.0,
7.0 / 273.0, 26.0 / 273.0, 41.0 / 273.0, 26.0 / 273.0, 7.0 / 273.0,
4.0 / 273.0, 16.0 / 273.0, 26.0 / 273.0, 16.0 / 273.0, 4.0 / 273.0,
1.0 / 273.0, 4.0 / 273.0, 7.0 / 273.0, 4.0 / 273.0, 1.0 / 273.0
};
unsigned char *d_gaussInput, *d_gaussOutput, *d_sobelOutput;
float *d_gaussKernel, *d_gradDirections;
int imgRes = input.height * input.width;
dim3 blocks(input.width / 16, input.height / 16);
dim3 threads(16, 16);
cudaMalloc(&d_gaussInput, imgRes);
cudaMalloc(&d_gaussOutput, imgRes);
cudaMalloc(&d_gaussKernel, GAUSS_WIDTH * GAUSS_WIDTH * sizeof(float));
cudaMemcpy(d_gaussInput, input.data, imgRes, cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussKernel, gaussianKernel, GAUSS_WIDTH * GAUSS_WIDTH * sizeof(float), cudaMemcpyHostToDevice);
applyGaussianFilter <<< blocks, threads >>> (d_gaussInput, d_gaussOutput, d_gaussKernel, input.height, input.width, GAUSS_WIDTH);
cudaMalloc(&d_sobelOutput, imgRes);
cudaMalloc(&d_gradDirections, imgRes * sizeof(float));
cudaMemcpy(d_sobelOutput, d_gaussOutput, imgRes, cudaMemcpyDeviceToDevice);
applySobelFilter <<< blocks, threads >>> (d_gaussOutput, d_sobelOutput, d_gradDirections, input.height, input.width);
copyPropertiesToImage(input, output);
output.data = (unsigned char*)malloc(output.height * output.width);
cudaMemcpy(output.data, d_sobelOutput, imgRes, cudaMemcpyDeviceToHost);
cudaFree(d_gaussKernel);
cudaFree(d_gaussInput);
cudaFree(d_gaussOutput);
cudaFree(d_sobelOutput);
cudaFree(d_gradDirections);
writeData(argv[2], output);
system("pause");
return 0;
} |
fb3747ed20188b22da9ebd37481e6c707a9f4e64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
* Copyright (C) 2020 *
* *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
@author Broux Paul-Emmanuel <[email protected]>
*/
#define BENCH_ON
#include <stdio.h>
#include <cstdlib>
#include "utils.h"
#include "typedef.h"
#include "encryptKernelECB.h"
#include "decryptKernelECB.h"
#include "sboxE.h"
#include "sboxD.h"
int main(int argc, char * argv[]) {
///////////////////////////////////////////////////////////////
// command line arguments
///////////////////////////////////////////////////////////////
int warm_up_device = 0; // GPU kernel warm up
int threadNum = 512; // Threads per block. This is a recommanded number.
int blockNum = 0; // Number of blocks in the grid
int streamNum = 2; // Number of streams to create for overlapping
int mode = 1; // Encryption mode, 1 to encrypt or 0 to decrypt.
char * filename;
char * keyfilename;
std::cout << std::endl << "********************************************************************" ;
std::cout << std::endl << "****** AES-128 CUDA ******" ;
std::cout << std::endl << "****** Overlapping ******" ;
std::cout << std::endl << "********************************************************************" << std::endl << std::endl;
if (argc > 1){
for( int n=1 ; n<argc ; n=n+2 ) {
if((strcmp(argv[n],"-wuDevice") == 0) && (n+1<argc)) {
warm_up_device = atoi(argv[n+1]);
}
else if((strcmp(argv[n],"-threadNum") == 0) && (n+1<argc)) {
threadNum = atoi(argv[n+1]);
if(threadNum ==0) {
printf("\n threadNum must be a non-null value.\n");
exit(1);
}
}
else if((strcmp(argv[n],"-blockNum") == 0) && (n+1<argc)) {
blockNum = atoi(argv[n+1]);
}
else if((strcmp(argv[n],"-streamNum") == 0) && (n+1<argc)) {
streamNum = atoi(argv[n+1]);
if(streamNum ==0) {
printf("\n streamNum must be a non-null value.\n");
exit(1);
}
if(streamNum >= 16) {
printf("\n streamNum can't exceed 15.\n");
exit(1);
}
}
else if((strcmp(argv[n],"-mode") == 0) && (n+1<argc)) {
mode = atoi(argv[n+1]);
}
else if((strcmp(argv[n],"-filename") == 0) && (n+1<argc)) {
filename = argv[n+1];
}
else if((strcmp(argv[n],"-keyfilename") == 0) && (n+1<argc)) {
keyfilename = argv[n+1];
}
else if((strcmp(argv[n],"-help") == 0)) {
std::cout << " This is a AES-128 implementation." << std::endl;
std::cout << " \"-options value\" availables are:" << std::endl;
std::cout << " -mode, 1 to encrypt and 0 to decrypt. Default value is 1." << std::endl;
std::cout << " -filename, the file path to encrypt or decrypt." << std::endl;
std::cout << " -keyfilename, the 128 key file's path to use for encyption or decryption." << std::endl;
std::cout << " -threadNum to set the number of threads per block. Default recommended value is 512." << std::endl;
std::cout << " -blockNum to set the number of blocks in the grid. Default value is 0 and will create" << std::endl << " enough blocks taking into account the input file size and the threadNum argument." << std::endl;
std::cout << " -streamNum will set the number of streams and the number of chunk created from the plaintext. Default value is 2 and max is 15." << std::endl;
std::cout << " -wuDevice number of device kernel launch before timing (around 1000 or less is adviced) " << std::endl;
std::cout << " The order between options is not important." << std::endl << std::endl;
exit(0);
}
else {
std::cout << std::endl << "Argument " << argv[n] << " does not correspond to any valid arguments. Type -help for details about valid command line arguments." <<std::endl;
exit(1);
}
}
}
else {
std::cout << std::endl << std::endl << "Not enough arguments. Type -help option in the command line for further explanations." << std::endl;
exit(1);
}
std::cout << " mode = " << mode << std::endl;
std::cout << " threadNum = " << threadNum << std::endl;
std::cout << " blockNum = " << blockNum << std::endl;
std::cout << " streamNum = " << streamNum << std::endl;
std::cout << " wuDevice = " << warm_up_device << std::endl << std::endl;
//Copying the key file
unsigned char key[16];
FILE * keyFile;
keyFile = fopen(keyfilename,"rb");
if (keyFile == NULL) {
perror ("Error opening file");
exit(1);
}
else {
for(int i=0 ; i<16 ; i+=4) {
if(fscanf(keyFile, "%x", (unsigned int *)&key[i]) != 1 ) {
perror ("Error reading keyfile. Make sure the key is hexadecimal words like \"0x01234567 0x89abcdef ...\" .\n");
exit(1);
}
}
}
fclose(keyFile);
// ***Key scheduling***
uint8 expkey[176];
ExpandKey (key, expkey);
hipMemcpyToSymbol(const_expkey, expkey, 176*sizeof(uint8)); //Moving the expanding key to constant memory
hipMemcpyToSymbol(const_IK0, IK0, 256*sizeof(uint32_t));
// ***Inputdata file to encrypt/decrypt***
//Checking for the size of the file
int filesize;
filesize = fsize(filename);
//CMS padding to have 16 bytes blocks of data
uint8 padElmt = 0;
uint8 streamPad = 0;
int mod16 = filesize%16;
if(mode){
padElmt = 16 - mod16; // We always add bytes for later padding detection
mod16 = ((filesize+padElmt)/streamNum)%16; //padding for making each futur chunk multiple of 16
streamPad = streamNum*(16 - mod16);
}
//Creating required arrays with page-locked memory
uint8 *hostInData;
checkCudaErrors(hipHostMalloc((void**)&hostInData, (filesize+padElmt+streamPad)*sizeof(uint8), hipHostMallocDefault));
//Opening the file
FILE * inputFile;
int result;
inputFile = fopen(filename,"rb");
if (inputFile == NULL) {
perror ("Error opening file");
exit(1);
}
result = fread (hostInData, sizeof(uint8), filesize, inputFile);
if(result != filesize) {
perror("Reading error from the input file");
exit(1);
}
fclose(inputFile);
//Padding
for (int i = 0; i < padElmt; i++) {
hostInData[filesize + i] = padElmt;
}
filesize += padElmt;
for (int i = 0; i < streamPad; i++) {
hostInData[filesize + i] = streamPad;
}
filesize += streamPad;
std::cout << " Data to treat with padding elements: " << filesize << " bytes." << std::endl;
//Determining grid size if not given
int size = filesize/streamNum; // To treat by each kernel launch
if(!blockNum) {
blockNum = 1+size/(threadNum*16);
}
else {
if(blockNum*threadNum* 16 < size) {
std::cerr << std::endl << std::endl << "BlockNum and ThreadNum don't fit the data file ton encrypt/decrypt. ";
exit(1);
}
}
std::cout << " Gridsize in term of block: " << blockNum << std::endl;
//Streams creation
hipStream_t stream[streamNum];
for (int i = 0; i < streamNum; ++i)
hipStreamCreate(&stream[i]);
//Device vectors declarations and allocations
uint32_t * devInput, * devOutput, * dev_sm_te1, * dev_sm_te2, * dev_sm_te3, * dev_sm_te4;
uint8 * dev_sm_sbox;
hipMalloc( (void **) &devInput , filesize*sizeof(uint8));
hipMalloc( (void **) &devOutput , filesize*sizeof(uint8));
hipMalloc( (void **) &dev_sm_te1 , 256*sizeof(uint32_t));
hipMalloc( (void **) &dev_sm_te2 , 256*sizeof(uint32_t));
hipMalloc( (void **) &dev_sm_te3 , 256*sizeof(uint32_t));
hipMalloc( (void **) &dev_sm_te4 , 256*sizeof(uint32_t));
hipMalloc( (void **) &dev_sm_sbox , 256*sizeof(uint8));
//GPU + memory transfers time
hipEvent_t startHost, stopHost, delayHost;
checkCudaErrors(hipEventCreate(&startHost));
checkCudaErrors(hipEventCreate(&stopHost));
checkCudaErrors(hipEventCreate(&delayHost));
//To record device time execution
hipEvent_t startDevice, stopDevice;
checkCudaErrors(hipEventCreate(&startDevice));
checkCudaErrors(hipEventCreate(&stopDevice));
//Copy vectors from host memory to device memory
if(mode) {
hipMemcpy(dev_sm_te1 , TBox0 , 256*sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(dev_sm_te2 , TBox1 , 256*sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(dev_sm_te3 , TBox2 , 256*sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(dev_sm_te4 , TBox3 , 256*sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(dev_sm_sbox , sbox , 256*sizeof(uint8 ), hipMemcpyHostToDevice);
}
else {
hipMemcpy(dev_sm_te1 , TBoxi0 , 256*sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(dev_sm_te2 , TBoxi1 , 256*sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(dev_sm_te3 , TBoxi2 , 256*sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(dev_sm_te4 , TBoxi3 , 256*sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(dev_sm_sbox , sbox_inv , 256*sizeof(uint8 ), hipMemcpyHostToDevice);
}
uint32_t *hostData = (uint32_t*)hostInData;
int word = size/4;
//Warm Up
hipMemcpy(devInput, hostInData, filesize*sizeof(uint8_t), hipMemcpyHostToDevice);
if(mode) {
for(int i=0; i < warm_up_device ; i++) {
hipLaunchKernelGGL(( encrypt_Kernel), dim3(blockNum),dim3(threadNum), 0, 0, devInput, devOutput, filesize, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
}
}
else {
for(int i=0; i < warm_up_device ; i++) {
hipLaunchKernelGGL(( decrypt_Kernel), dim3(blockNum),dim3(threadNum), 0, 0, devInput, devOutput, filesize, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
}
}
#ifdef BENCH_ON
if(mode) {
printf("\nBENCH_ON\n");
checkCudaErrors(hipEventRecord(startDevice, NULL));
for(int j=0; j<1000; j++){
for(int i=0; i < streamNum ; ++i) {
hipMemcpyAsync(devInput+i*word, hostData+i*word, size, hipMemcpyHostToDevice, stream[i]);
hipLaunchKernelGGL(( encrypt_Kernel), dim3(blockNum),dim3(threadNum), 0, stream[i], devInput+i*word, devOutput+i*word, size, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
hipMemcpyAsync(hostData+i*word, devOutput+i*word, size, hipMemcpyDeviceToHost, stream[i]);
}
}
checkCudaErrors(hipEventRecord(stopDevice, NULL));
}
else {
checkCudaErrors(hipEventRecord(startDevice, NULL));
for(int j=0; j<1000; j++){
for(int i=0; i < streamNum ; ++i) {
hipMemcpyAsync(devInput+i*word, hostData+i*word, size, hipMemcpyHostToDevice, stream[i]);
hipLaunchKernelGGL(( decrypt_Kernel), dim3(blockNum),dim3(threadNum), 0, stream[i], devInput+i*word, devOutput+i*word, size, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
hipMemcpyAsync(hostData+i*word, devOutput+i*word, size, hipMemcpyDeviceToHost, stream[i]);
}
}
checkCudaErrors(hipEventRecord(stopDevice, NULL));
}
#else
if(mode) {
checkCudaErrors(hipEventRecord(startDevice, NULL));
for(int i=0; i < streamNum ; ++i) {
hipMemcpyAsync(devInput+i*word, hostData+i*word, size, hipMemcpyHostToDevice, stream[i]);
hipLaunchKernelGGL(( encrypt_Kernel), dim3(blockNum),dim3(threadNum), 0, stream[i], devInput+i*word, devOutput+i*word, size, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
hipMemcpyAsync(hostData+i*word, devOutput+i*word, size, hipMemcpyDeviceToHost, stream[i]);
}
checkCudaErrors(hipEventRecord(stopDevice, NULL));
}
else {
checkCudaErrors(hipEventRecord(startDevice, NULL));
for(int i=0; i < streamNum ; ++i) {
hipMemcpyAsync(devInput+i*word, hostData+i*word, size, hipMemcpyHostToDevice, stream[i]);
hipLaunchKernelGGL(( decrypt_Kernel), dim3(blockNum),dim3(threadNum), 0, stream[i], devInput+i*word, devOutput+i*word, size, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
hipMemcpyAsync(hostData+i*word, devOutput+i*word, size, hipMemcpyDeviceToHost, stream[i]);
}
checkCudaErrors(hipEventRecord(stopDevice, NULL));
}
#endif
checkCudaErrors(hipEventSynchronize(stopDevice));
checkCudaErrors(hipDeviceSynchronize());
//Time calculation
float Devmsec = 0.0f;
checkCudaErrors(hipEventElapsedTime(&Devmsec, startDevice, stopDevice));
#ifdef BENCH_ON
Devmsec/= 1000;
#endif
double throughput = 1.0e-9f*8*filesize/(Devmsec*1.0e-3f);
printf("\n GPU processing time: %f (ms)", Devmsec);
printf("\n GPU throughput: %f (Gbps)\n", throughput);
//Writing results inside a file
FILE * outputFile;
outputFile = fopen("Result/result.dat","wb");
if (outputFile == NULL) {
perror ("Error opening file");
exit(1);
}
if(mode){
result = fwrite (hostInData, sizeof(uint8), filesize, outputFile);
if(result != filesize) {
perror("Writting error to the output file");
exit(1);
}
}
else {
uint8 padTotal = hostInData[filesize - 1];
padTotal += hostInData[filesize - padTotal - 1];
result = fwrite (hostInData, sizeof(uint8), filesize-padTotal, outputFile);
if(result != filesize-padTotal) {
perror("Writting error to the output file");
exit(1);
}
}
fclose(outputFile);
//free streams
for (int i = 0; i < streamNum; ++i)
hipStreamDestroy(stream[i]);
// Free device memory
checkCudaErrors( hipHostFree(hostInData) );
hipFree(devInput);
hipFree(devOutput);
hipFree(dev_sm_te1);
hipFree(dev_sm_te2);
hipFree(dev_sm_te3);
hipFree(dev_sm_te4);
hipFree(dev_sm_sbox);
return 0;
}
| fb3747ed20188b22da9ebd37481e6c707a9f4e64.cu |
/***************************************************************************
* Copyright (C) 2020 *
* *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
@author Broux Paul-Emmanuel <[email protected]>
*/
#define BENCH_ON
#include <stdio.h>
#include <cstdlib>
#include "utils.h"
#include "typedef.h"
#include "encryptKernelECB.h"
#include "decryptKernelECB.h"
#include "sboxE.h"
#include "sboxD.h"
int main(int argc, char * argv[]) {
///////////////////////////////////////////////////////////////
// command line arguments
///////////////////////////////////////////////////////////////
int warm_up_device = 0; // GPU kernel warm up
int threadNum = 512; // Threads per block. This is a recommanded number.
int blockNum = 0; // Number of blocks in the grid
int streamNum = 2; // Number of streams to create for overlapping
int mode = 1; // Encryption mode, 1 to encrypt or 0 to decrypt.
char * filename;
char * keyfilename;
std::cout << std::endl << "********************************************************************" ;
std::cout << std::endl << "****** AES-128 CUDA ******" ;
std::cout << std::endl << "****** Overlapping ******" ;
std::cout << std::endl << "********************************************************************" << std::endl << std::endl;
if (argc > 1){
for( int n=1 ; n<argc ; n=n+2 ) {
if((strcmp(argv[n],"-wuDevice") == 0) && (n+1<argc)) {
warm_up_device = atoi(argv[n+1]);
}
else if((strcmp(argv[n],"-threadNum") == 0) && (n+1<argc)) {
threadNum = atoi(argv[n+1]);
if(threadNum ==0) {
printf("\n threadNum must be a non-null value.\n");
exit(1);
}
}
else if((strcmp(argv[n],"-blockNum") == 0) && (n+1<argc)) {
blockNum = atoi(argv[n+1]);
}
else if((strcmp(argv[n],"-streamNum") == 0) && (n+1<argc)) {
streamNum = atoi(argv[n+1]);
if(streamNum ==0) {
printf("\n streamNum must be a non-null value.\n");
exit(1);
}
if(streamNum >= 16) {
printf("\n streamNum can't exceed 15.\n");
exit(1);
}
}
else if((strcmp(argv[n],"-mode") == 0) && (n+1<argc)) {
mode = atoi(argv[n+1]);
}
else if((strcmp(argv[n],"-filename") == 0) && (n+1<argc)) {
filename = argv[n+1];
}
else if((strcmp(argv[n],"-keyfilename") == 0) && (n+1<argc)) {
keyfilename = argv[n+1];
}
else if((strcmp(argv[n],"-help") == 0)) {
std::cout << " This is a AES-128 implementation." << std::endl;
std::cout << " \"-options value\" availables are:" << std::endl;
std::cout << " -mode, 1 to encrypt and 0 to decrypt. Default value is 1." << std::endl;
std::cout << " -filename, the file path to encrypt or decrypt." << std::endl;
std::cout << " -keyfilename, the 128 key file's path to use for encyption or decryption." << std::endl;
std::cout << " -threadNum to set the number of threads per block. Default recommended value is 512." << std::endl;
std::cout << " -blockNum to set the number of blocks in the grid. Default value is 0 and will create" << std::endl << " enough blocks taking into account the input file size and the threadNum argument." << std::endl;
std::cout << " -streamNum will set the number of streams and the number of chunk created from the plaintext. Default value is 2 and max is 15." << std::endl;
std::cout << " -wuDevice number of device kernel launch before timing (around 1000 or less is adviced) " << std::endl;
std::cout << " The order between options is not important." << std::endl << std::endl;
exit(0);
}
else {
std::cout << std::endl << "Argument " << argv[n] << " does not correspond to any valid arguments. Type -help for details about valid command line arguments." <<std::endl;
exit(1);
}
}
}
else {
std::cout << std::endl << std::endl << "Not enough arguments. Type -help option in the command line for further explanations." << std::endl;
exit(1);
}
std::cout << " mode = " << mode << std::endl;
std::cout << " threadNum = " << threadNum << std::endl;
std::cout << " blockNum = " << blockNum << std::endl;
std::cout << " streamNum = " << streamNum << std::endl;
std::cout << " wuDevice = " << warm_up_device << std::endl << std::endl;
//Copying the key file
unsigned char key[16];
FILE * keyFile;
keyFile = fopen(keyfilename,"rb");
if (keyFile == NULL) {
perror ("Error opening file");
exit(1);
}
else {
for(int i=0 ; i<16 ; i+=4) {
if(fscanf(keyFile, "%x", (unsigned int *)&key[i]) != 1 ) {
perror ("Error reading keyfile. Make sure the key is hexadecimal words like \"0x01234567 0x89abcdef ...\" .\n");
exit(1);
}
}
}
fclose(keyFile);
// ***Key scheduling***
uint8 expkey[176];
ExpandKey (key, expkey);
cudaMemcpyToSymbol(const_expkey, expkey, 176*sizeof(uint8)); //Moving the expanding key to constant memory
cudaMemcpyToSymbol(const_IK0, IK0, 256*sizeof(uint32_t));
// ***Inputdata file to encrypt/decrypt***
//Checking for the size of the file
int filesize;
filesize = fsize(filename);
//CMS padding to have 16 bytes blocks of data
uint8 padElmt = 0;
uint8 streamPad = 0;
int mod16 = filesize%16;
if(mode){
padElmt = 16 - mod16; // We always add bytes for later padding detection
mod16 = ((filesize+padElmt)/streamNum)%16; //padding for making each futur chunk multiple of 16
streamPad = streamNum*(16 - mod16);
}
//Creating required arrays with page-locked memory
uint8 *hostInData;
checkCudaErrors(cudaHostAlloc((void**)&hostInData, (filesize+padElmt+streamPad)*sizeof(uint8), cudaHostAllocDefault));
//Opening the file
FILE * inputFile;
int result;
inputFile = fopen(filename,"rb");
if (inputFile == NULL) {
perror ("Error opening file");
exit(1);
}
result = fread (hostInData, sizeof(uint8), filesize, inputFile);
if(result != filesize) {
perror("Reading error from the input file");
exit(1);
}
fclose(inputFile);
//Padding
for (int i = 0; i < padElmt; i++) {
hostInData[filesize + i] = padElmt;
}
filesize += padElmt;
for (int i = 0; i < streamPad; i++) {
hostInData[filesize + i] = streamPad;
}
filesize += streamPad;
std::cout << " Data to treat with padding elements: " << filesize << " bytes." << std::endl;
//Determining grid size if not given
int size = filesize/streamNum; // To treat by each kernel launch
if(!blockNum) {
blockNum = 1+size/(threadNum*16);
}
else {
if(blockNum*threadNum* 16 < size) {
std::cerr << std::endl << std::endl << "BlockNum and ThreadNum don't fit the data file ton encrypt/decrypt. ";
exit(1);
}
}
std::cout << " Gridsize in term of block: " << blockNum << std::endl;
//Streams creation
cudaStream_t stream[streamNum];
for (int i = 0; i < streamNum; ++i)
cudaStreamCreate(&stream[i]);
//Device vectors declarations and allocations
uint32_t * devInput, * devOutput, * dev_sm_te1, * dev_sm_te2, * dev_sm_te3, * dev_sm_te4;
uint8 * dev_sm_sbox;
cudaMalloc( (void **) &devInput , filesize*sizeof(uint8));
cudaMalloc( (void **) &devOutput , filesize*sizeof(uint8));
cudaMalloc( (void **) &dev_sm_te1 , 256*sizeof(uint32_t));
cudaMalloc( (void **) &dev_sm_te2 , 256*sizeof(uint32_t));
cudaMalloc( (void **) &dev_sm_te3 , 256*sizeof(uint32_t));
cudaMalloc( (void **) &dev_sm_te4 , 256*sizeof(uint32_t));
cudaMalloc( (void **) &dev_sm_sbox , 256*sizeof(uint8));
//GPU + memory transfers time
cudaEvent_t startHost, stopHost, delayHost;
checkCudaErrors(cudaEventCreate(&startHost));
checkCudaErrors(cudaEventCreate(&stopHost));
checkCudaErrors(cudaEventCreate(&delayHost));
//To record device time execution
cudaEvent_t startDevice, stopDevice;
checkCudaErrors(cudaEventCreate(&startDevice));
checkCudaErrors(cudaEventCreate(&stopDevice));
//Copy vectors from host memory to device memory
if(mode) {
cudaMemcpy(dev_sm_te1 , TBox0 , 256*sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sm_te2 , TBox1 , 256*sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sm_te3 , TBox2 , 256*sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sm_te4 , TBox3 , 256*sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sm_sbox , sbox , 256*sizeof(uint8 ), cudaMemcpyHostToDevice);
}
else {
cudaMemcpy(dev_sm_te1 , TBoxi0 , 256*sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sm_te2 , TBoxi1 , 256*sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sm_te3 , TBoxi2 , 256*sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sm_te4 , TBoxi3 , 256*sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sm_sbox , sbox_inv , 256*sizeof(uint8 ), cudaMemcpyHostToDevice);
}
uint32_t *hostData = (uint32_t*)hostInData;
int word = size/4;
//Warm Up
cudaMemcpy(devInput, hostInData, filesize*sizeof(uint8_t), cudaMemcpyHostToDevice);
if(mode) {
for(int i=0; i < warm_up_device ; i++) {
encrypt_Kernel<<<blockNum,threadNum>>>(devInput, devOutput, filesize, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
}
}
else {
for(int i=0; i < warm_up_device ; i++) {
decrypt_Kernel<<<blockNum,threadNum>>>(devInput, devOutput, filesize, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
}
}
#ifdef BENCH_ON
if(mode) {
printf("\nBENCH_ON\n");
checkCudaErrors(cudaEventRecord(startDevice, NULL));
for(int j=0; j<1000; j++){
for(int i=0; i < streamNum ; ++i) {
cudaMemcpyAsync(devInput+i*word, hostData+i*word, size, cudaMemcpyHostToDevice, stream[i]);
encrypt_Kernel<<<blockNum,threadNum, 0, stream[i]>>>(devInput+i*word, devOutput+i*word, size, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
cudaMemcpyAsync(hostData+i*word, devOutput+i*word, size, cudaMemcpyDeviceToHost, stream[i]);
}
}
checkCudaErrors(cudaEventRecord(stopDevice, NULL));
}
else {
checkCudaErrors(cudaEventRecord(startDevice, NULL));
for(int j=0; j<1000; j++){
for(int i=0; i < streamNum ; ++i) {
cudaMemcpyAsync(devInput+i*word, hostData+i*word, size, cudaMemcpyHostToDevice, stream[i]);
decrypt_Kernel<<<blockNum,threadNum, 0, stream[i]>>>(devInput+i*word, devOutput+i*word, size, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
cudaMemcpyAsync(hostData+i*word, devOutput+i*word, size, cudaMemcpyDeviceToHost, stream[i]);
}
}
checkCudaErrors(cudaEventRecord(stopDevice, NULL));
}
#else
if(mode) {
checkCudaErrors(cudaEventRecord(startDevice, NULL));
for(int i=0; i < streamNum ; ++i) {
cudaMemcpyAsync(devInput+i*word, hostData+i*word, size, cudaMemcpyHostToDevice, stream[i]);
encrypt_Kernel<<<blockNum,threadNum, 0, stream[i]>>>(devInput+i*word, devOutput+i*word, size, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
cudaMemcpyAsync(hostData+i*word, devOutput+i*word, size, cudaMemcpyDeviceToHost, stream[i]);
}
checkCudaErrors(cudaEventRecord(stopDevice, NULL));
}
else {
checkCudaErrors(cudaEventRecord(startDevice, NULL));
for(int i=0; i < streamNum ; ++i) {
cudaMemcpyAsync(devInput+i*word, hostData+i*word, size, cudaMemcpyHostToDevice, stream[i]);
decrypt_Kernel<<<blockNum,threadNum, 0, stream[i]>>>(devInput+i*word, devOutput+i*word, size, dev_sm_te1,
dev_sm_te2, dev_sm_te3, dev_sm_te4, dev_sm_sbox);
cudaMemcpyAsync(hostData+i*word, devOutput+i*word, size, cudaMemcpyDeviceToHost, stream[i]);
}
checkCudaErrors(cudaEventRecord(stopDevice, NULL));
}
#endif
checkCudaErrors(cudaEventSynchronize(stopDevice));
checkCudaErrors(cudaDeviceSynchronize());
//Time calculation
float Devmsec = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&Devmsec, startDevice, stopDevice));
#ifdef BENCH_ON
Devmsec/= 1000;
#endif
double throughput = 1.0e-9f*8*filesize/(Devmsec*1.0e-3f);
printf("\n GPU processing time: %f (ms)", Devmsec);
printf("\n GPU throughput: %f (Gbps)\n", throughput);
//Writing results inside a file
FILE * outputFile;
outputFile = fopen("Result/result.dat","wb");
if (outputFile == NULL) {
perror ("Error opening file");
exit(1);
}
if(mode){
result = fwrite (hostInData, sizeof(uint8), filesize, outputFile);
if(result != filesize) {
perror("Writting error to the output file");
exit(1);
}
}
else {
uint8 padTotal = hostInData[filesize - 1];
padTotal += hostInData[filesize - padTotal - 1];
result = fwrite (hostInData, sizeof(uint8), filesize-padTotal, outputFile);
if(result != filesize-padTotal) {
perror("Writting error to the output file");
exit(1);
}
}
fclose(outputFile);
//free streams
for (int i = 0; i < streamNum; ++i)
cudaStreamDestroy(stream[i]);
// Free device memory
checkCudaErrors( cudaFreeHost(hostInData) );
cudaFree(devInput);
cudaFree(devOutput);
cudaFree(dev_sm_te1);
cudaFree(dev_sm_te2);
cudaFree(dev_sm_te3);
cudaFree(dev_sm_te4);
cudaFree(dev_sm_sbox);
return 0;
}
|
e399e317aaf281691ece7295f439e2875e4a0777.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wall_repulsion.h"
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/config.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/cuda_rng.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/walls/simple_stationary_wall.h>
namespace mirheo
{
namespace channel_names
{
static const std::string sdf = "sdf";
static const std::string grad_sdf = "grad_sdf";
} // namespace channel_names
namespace wall_repulsion_plugin_kernels
{
__global__ void forceFromSDF(PVview view, const real *sdfs, const real3 *gradients, real C, real h, real maxForce)
{
const int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= view.size) return;
const real sdf = sdfs[pid];
if (sdf + h >= 0.0_r)
{
const real3 f = -gradients[pid] * math::min( maxForce, C * math::max(sdf + h, 0.0_r) );
atomicAdd(view.forces + pid, f);
}
}
} // wall_repulsion_plugin_kernels
WallRepulsionPlugin::WallRepulsionPlugin(const MirState *state, std::string name,
std::string pvName, std::string wallName,
real C, real h, real maxForce) :
SimulationPlugin(state, name),
pvName_(pvName),
wallName_(wallName),
C_(C),
h_(h),
maxForce_(maxForce)
{}
WallRepulsionPlugin::WallRepulsionPlugin(
const MirState *state, Loader&, const ConfigObject& config) :
WallRepulsionPlugin(state, config["name"], config["pvName"], config["wallName"],
config["C"], config["h"], config["maxForce"])
{}
void WallRepulsionPlugin::setup(Simulation* simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
pv_ = simulation->getPVbyNameOrDie(pvName_);
wall_ = dynamic_cast<SDFBasedWall*>(simulation->getWallByNameOrDie(wallName_));
pv_->requireDataPerParticle<real>(channel_names::sdf, DataManager::PersistenceMode::None);
pv_->requireDataPerParticle<real3>(channel_names::grad_sdf, DataManager::PersistenceMode::None);
if (wall_ == nullptr)
die("Wall repulsion plugin '%s' can only work with SDF-based walls, but got wall '%s'",
getCName(), wallName_.c_str());
}
// TODO: make that force be computed on halo also
// to get rid of the SDF wall margin
void WallRepulsionPlugin::beforeIntegration(hipStream_t stream)
{
PVview view(pv_, pv_->local());
auto sdfs = pv_->local()->dataPerParticle.getData<real>(channel_names::sdf);
auto gradients = pv_->local()->dataPerParticle.getData<real3>(channel_names::grad_sdf);
const real gradientThreshold = h_ + 0.1_r;
wall_->sdfPerParticle(pv_->local(), sdfs, gradients, gradientThreshold, stream);
const int nthreads = 128;
SAFE_KERNEL_LAUNCH(
wall_repulsion_plugin_kernels::forceFromSDF,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, sdfs->devPtr(), gradients->devPtr(), C_, h_, maxForce_ );
}
void WallRepulsionPlugin::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(saver, "WallRepulsionPlugin"));
}
ConfigObject WallRepulsionPlugin::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = SimulationPlugin::_saveSnapshot(saver, typeName);
config.emplace("pvName", saver(pvName_));
config.emplace("wallName", saver(wallName_));
config.emplace("C", saver(C_));
config.emplace("h", saver(h_));
config.emplace("maxForce", saver(maxForce_));
return config;
}
} // namespace mirheo
| e399e317aaf281691ece7295f439e2875e4a0777.cu | #include "wall_repulsion.h"
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/config.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/cuda_rng.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/walls/simple_stationary_wall.h>
namespace mirheo
{
namespace channel_names
{
static const std::string sdf = "sdf";
static const std::string grad_sdf = "grad_sdf";
} // namespace channel_names
namespace wall_repulsion_plugin_kernels
{
__global__ void forceFromSDF(PVview view, const real *sdfs, const real3 *gradients, real C, real h, real maxForce)
{
const int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= view.size) return;
const real sdf = sdfs[pid];
if (sdf + h >= 0.0_r)
{
const real3 f = -gradients[pid] * math::min( maxForce, C * math::max(sdf + h, 0.0_r) );
atomicAdd(view.forces + pid, f);
}
}
} // wall_repulsion_plugin_kernels
WallRepulsionPlugin::WallRepulsionPlugin(const MirState *state, std::string name,
std::string pvName, std::string wallName,
real C, real h, real maxForce) :
SimulationPlugin(state, name),
pvName_(pvName),
wallName_(wallName),
C_(C),
h_(h),
maxForce_(maxForce)
{}
WallRepulsionPlugin::WallRepulsionPlugin(
const MirState *state, Loader&, const ConfigObject& config) :
WallRepulsionPlugin(state, config["name"], config["pvName"], config["wallName"],
config["C"], config["h"], config["maxForce"])
{}
void WallRepulsionPlugin::setup(Simulation* simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
pv_ = simulation->getPVbyNameOrDie(pvName_);
wall_ = dynamic_cast<SDFBasedWall*>(simulation->getWallByNameOrDie(wallName_));
pv_->requireDataPerParticle<real>(channel_names::sdf, DataManager::PersistenceMode::None);
pv_->requireDataPerParticle<real3>(channel_names::grad_sdf, DataManager::PersistenceMode::None);
if (wall_ == nullptr)
die("Wall repulsion plugin '%s' can only work with SDF-based walls, but got wall '%s'",
getCName(), wallName_.c_str());
}
// TODO: make that force be computed on halo also
// to get rid of the SDF wall margin
void WallRepulsionPlugin::beforeIntegration(cudaStream_t stream)
{
PVview view(pv_, pv_->local());
auto sdfs = pv_->local()->dataPerParticle.getData<real>(channel_names::sdf);
auto gradients = pv_->local()->dataPerParticle.getData<real3>(channel_names::grad_sdf);
const real gradientThreshold = h_ + 0.1_r;
wall_->sdfPerParticle(pv_->local(), sdfs, gradients, gradientThreshold, stream);
const int nthreads = 128;
SAFE_KERNEL_LAUNCH(
wall_repulsion_plugin_kernels::forceFromSDF,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, sdfs->devPtr(), gradients->devPtr(), C_, h_, maxForce_ );
}
void WallRepulsionPlugin::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(saver, "WallRepulsionPlugin"));
}
ConfigObject WallRepulsionPlugin::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = SimulationPlugin::_saveSnapshot(saver, typeName);
config.emplace("pvName", saver(pvName_));
config.emplace("wallName", saver(wallName_));
config.emplace("C", saver(C_));
config.emplace("h", saver(h_));
config.emplace("maxForce", saver(maxForce_));
return config;
}
} // namespace mirheo
|
10f70db1988fb257f605dab9f3a893915f2c9df8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>
#ifdef __NVCC__
#include <hipcub/hipcub.hpp>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/cum_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
using Tensor = paddle::framework::Tensor;
using LoDTensor = paddle::framework::LoDTensor;
namespace paddle {
namespace operators {
template <typename T, int BLOCK_SIZE>
__device__ void BlockReverse(const T* idata, T* odata, int src_base,
int dst_base, int valid_item) {
__shared__ T sh_mem[BLOCK_SIZE];
int tx = threadIdx.x;
int offset = tx;
int in_index = src_base + offset;
if (offset >= valid_item) {
sh_mem[offset] = 0;
} else {
int sh_mem_index = BLOCK_SIZE - offset - 1;
T data = idata[in_index];
sh_mem[sh_mem_index] = data;
}
__syncthreads();
int out_index = dst_base - offset;
if (offset < valid_item) {
int sh_mem_index = BLOCK_SIZE - offset - 1;
odata[out_index] = sh_mem[sh_mem_index];
}
}
template <typename T>
__global__ void MatrixRowReverse(const T* matrix_data, T* reverse_data,
int reverse_size, int outer_size,
int inner_size) {
int bx = blockIdx.x;
int by = blockIdx.y;
int item_per_block = 1024;
for (int block_offset = 0; block_offset < reverse_size;
block_offset += item_per_block) {
int valid_item = (reverse_size - block_offset > item_per_block)
? item_per_block
: reverse_size - block_offset;
int src_offset =
bx * reverse_size + block_offset + by * (inner_size * reverse_size);
int dst_offset = bx * reverse_size + by * (inner_size * reverse_size) +
reverse_size - 1 - block_offset;
if (reverse_size < item_per_block) {
valid_item = reverse_size;
}
BlockReverse<T, 1024>(matrix_data, reverse_data, src_offset, dst_offset,
valid_item);
}
}
template <typename T>
struct BlockPrefixCallbackOp {
// Running prefix
T running_total;
// Constructor
__device__ BlockPrefixCallbackOp(T running_total)
: running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide
// scan.
__device__ T operator()(T block_aggregate) {
T old_prefix = running_total;
running_total = old_prefix + block_aggregate;
return old_prefix;
}
};
// No bank-conflict transpose
template <typename T, int TILE_DIM, int BLOCK_ROWS>
__global__ void MatrixTranspose(T* odata, const T* idata, size_t height,
size_t width) {
__shared__ T tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < width && (y + j) < height) {
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x];
} else {
tile[threadIdx.y + j][threadIdx.x] = 0;
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < height && (y + j) < width) {
odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
}
template <typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__ void BlockScanKernel(T* d_out, const T* d_in, int inner_size,
int outer_size, int scan_size, bool exclusive) {
// Specialize BlockLoad, BlockStore, and BlockRadixSort collective types
typedef cub::BlockLoad<T, BLOCK_THREADS, ITEMS_PER_THREAD,
cub::BLOCK_LOAD_TRANSPOSE>
BlockLoadT;
typedef cub::BlockStore<T, BLOCK_THREADS, ITEMS_PER_THREAD,
cub::BLOCK_STORE_TRANSPOSE>
BlockStoreT;
typedef hipcub::BlockScan<T, BLOCK_THREADS> BlockScanT;
// Allocate type-safe, repurposable shared memory for collectives
__shared__ union {
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
int bx = blockIdx.x;
int by = blockIdx.y;
BlockPrefixCallbackOp<T> prefix_op(0);
T block_aggregate = static_cast<T>(0);
// Obtain this block's segment of consecutive keys (blocked across threads)
int item_per_block = BLOCK_THREADS * ITEMS_PER_THREAD;
for (int block_offset = 0; block_offset < scan_size;
block_offset += BLOCK_THREADS * ITEMS_PER_THREAD) {
int valid_item = (scan_size - block_offset > item_per_block)
? item_per_block
: (scan_size - block_offset);
if (scan_size < item_per_block) {
valid_item = scan_size;
}
int offset = bx * scan_size + block_offset + by * (inner_size * scan_size);
T thread_keys[ITEMS_PER_THREAD];
BlockLoadT(temp_storage.load)
.Load(d_in + offset, thread_keys, valid_item, 0);
__syncthreads();
if (exclusive) {
T init_value = static_cast<T>(0);
BlockScanT(temp_storage.scan)
.ExclusiveScan(thread_keys, thread_keys, hipcub::Sum(), prefix_op);
} else {
BlockScanT(temp_storage.scan)
.InclusiveScan(thread_keys, thread_keys, hipcub::Sum(), prefix_op);
}
__syncthreads();
BlockStoreT(temp_storage.store)
.Store(d_out + offset, thread_keys, valid_item);
}
}
template <typename DeviceContext, typename T>
class CumCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<framework::Tensor>("X");
auto* out = context.Output<framework::Tensor>("Out");
int axis = context.Attr<int>("axis");
bool exclusive = context.Attr<bool>("exclusive");
bool reverse = context.Attr<bool>("reverse");
auto out_dims = out->dims();
auto size = in->numel();
PADDLE_ENFORCE_EQ(
axis < out_dims.size() && axis >= (0 - out_dims.size()), true,
platform::errors::OutOfRange(
"Attr(axis) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(axis) = %d.",
out_dims.size(), out_dims.size() - 1, axis));
if (axis < 0) {
axis += out_dims.size();
}
T* out_data = out->mutable_data<T>(context.GetPlace());
const T* in_data = in->data<T>();
// Use thrust for parallel acceleration when the input size is equal to the
// length of the axis dimension.
if (size == out_dims[axis]) {
if (reverse) {
thrust::device_ptr<const T> dev_ptr =
thrust::device_pointer_cast(in_data);
thrust::device_vector<T> vec(dev_ptr, dev_ptr + size);
if (exclusive) {
thrust::exclusive_scan(thrust::device, vec.rbegin(), vec.rend(),
out_data);
} else {
thrust::inclusive_scan(thrust::device, vec.rbegin(), vec.rend(),
out_data);
}
thrust::reverse(thrust::device, out_data, out_data + size);
} else {
if (exclusive) {
thrust::exclusive_scan(thrust::device, in_data, in_data + size,
out_data);
} else {
thrust::inclusive_scan(thrust::device, in_data, in_data + size,
out_data);
}
}
return;
}
size_t height = 1;
size_t width = 1;
for (size_t i = 0; i <= axis; i++) {
height *= out_dims[i];
}
for (size_t i = axis + 1; i < out_dims.size(); i++) {
width *= out_dims[i];
}
int scan_size = out_dims[axis];
bool transpose = (axis != out_dims.size() - 1);
int tile_size = 32;
dim3 blocks(32, 8);
dim3 transpose_grids((width + tile_size - 1) / tile_size,
(height + tile_size - 1) / tile_size);
auto& dev_ctx = context.template device_context<DeviceContext>();
framework::Tensor tmp;
tmp.Resize(out_dims);
auto* tmp_data = tmp.mutable_data<T>(context.GetPlace());
T* next_in_data = out_data;
T* next_out_data = tmp_data;
if (transpose) {
hipLaunchKernelGGL(( MatrixTranspose<T, 32,
8>), dim3(transpose_grids), dim3(blocks), 0, dev_ctx.stream(),
out_data, in_data, height, width);
next_in_data = out_data;
next_out_data = tmp_data;
}
auto swap_ptr = [](T*& ptr1, T*& ptr2) {
T* tmp = ptr2;
ptr2 = ptr1;
ptr1 = tmp;
};
int outer_size = height / scan_size;
int inner_size = width;
// Consider the size of shared memory, here block size is 128
dim3 scan_grid(outer_size, inner_size);
dim3 reverse_grid = scan_grid;
if (reverse) {
if (transpose) {
reverse_grid.x = scan_grid.y;
reverse_grid.y = scan_grid.x;
hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(),
next_in_data, next_out_data, scan_size, outer_size, inner_size);
if (!transpose) next_in_data = tmp_data;
swap_ptr(next_in_data, next_out_data);
} else {
hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(),
in_data, out_data, scan_size, outer_size, inner_size);
}
}
if (!transpose && !reverse) {
hipLaunchKernelGGL(( BlockScanKernel<T, 128, 4>), dim3(scan_grid), dim3(128), 0, dev_ctx.stream(),
out_data, in_data, outer_size, inner_size, scan_size, exclusive);
} else {
hipLaunchKernelGGL(( BlockScanKernel<T, 128, 4>), dim3(scan_grid), dim3(128), 0, dev_ctx.stream(),
next_out_data, next_in_data, outer_size, inner_size, scan_size,
exclusive);
}
swap_ptr(next_in_data, next_out_data);
if (reverse) {
hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(),
next_in_data, next_out_data, scan_size, outer_size, inner_size);
swap_ptr(next_in_data, next_out_data);
}
if (transpose) {
transpose_grids.x = (height + tile_size - 1) / tile_size;
transpose_grids.y = (width + tile_size - 1) / tile_size;
hipLaunchKernelGGL(( MatrixTranspose<T, 32,
8>), dim3(transpose_grids), dim3(blocks), 0, dev_ctx.stream(),
next_out_data, next_in_data, width, height);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
cumsum, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
| 10f70db1988fb257f605dab9f3a893915f2c9df8.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>
#ifdef __NVCC__
#include <cub/cub.cuh>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/cum_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
using Tensor = paddle::framework::Tensor;
using LoDTensor = paddle::framework::LoDTensor;
namespace paddle {
namespace operators {
template <typename T, int BLOCK_SIZE>
__device__ void BlockReverse(const T* idata, T* odata, int src_base,
int dst_base, int valid_item) {
__shared__ T sh_mem[BLOCK_SIZE];
int tx = threadIdx.x;
int offset = tx;
int in_index = src_base + offset;
if (offset >= valid_item) {
sh_mem[offset] = 0;
} else {
int sh_mem_index = BLOCK_SIZE - offset - 1;
T data = idata[in_index];
sh_mem[sh_mem_index] = data;
}
__syncthreads();
int out_index = dst_base - offset;
if (offset < valid_item) {
int sh_mem_index = BLOCK_SIZE - offset - 1;
odata[out_index] = sh_mem[sh_mem_index];
}
}
template <typename T>
__global__ void MatrixRowReverse(const T* matrix_data, T* reverse_data,
int reverse_size, int outer_size,
int inner_size) {
int bx = blockIdx.x;
int by = blockIdx.y;
int item_per_block = 1024;
for (int block_offset = 0; block_offset < reverse_size;
block_offset += item_per_block) {
int valid_item = (reverse_size - block_offset > item_per_block)
? item_per_block
: reverse_size - block_offset;
int src_offset =
bx * reverse_size + block_offset + by * (inner_size * reverse_size);
int dst_offset = bx * reverse_size + by * (inner_size * reverse_size) +
reverse_size - 1 - block_offset;
if (reverse_size < item_per_block) {
valid_item = reverse_size;
}
BlockReverse<T, 1024>(matrix_data, reverse_data, src_offset, dst_offset,
valid_item);
}
}
template <typename T>
struct BlockPrefixCallbackOp {
// Running prefix
T running_total;
// Constructor
__device__ BlockPrefixCallbackOp(T running_total)
: running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide
// scan.
__device__ T operator()(T block_aggregate) {
T old_prefix = running_total;
running_total = old_prefix + block_aggregate;
return old_prefix;
}
};
// No bank-conflict transpose
template <typename T, int TILE_DIM, int BLOCK_ROWS>
__global__ void MatrixTranspose(T* odata, const T* idata, size_t height,
size_t width) {
__shared__ T tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < width && (y + j) < height) {
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x];
} else {
tile[threadIdx.y + j][threadIdx.x] = 0;
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < height && (y + j) < width) {
odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
}
template <typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__ void BlockScanKernel(T* d_out, const T* d_in, int inner_size,
int outer_size, int scan_size, bool exclusive) {
// Specialize BlockLoad, BlockStore, and BlockRadixSort collective types
typedef cub::BlockLoad<T, BLOCK_THREADS, ITEMS_PER_THREAD,
cub::BLOCK_LOAD_TRANSPOSE>
BlockLoadT;
typedef cub::BlockStore<T, BLOCK_THREADS, ITEMS_PER_THREAD,
cub::BLOCK_STORE_TRANSPOSE>
BlockStoreT;
typedef cub::BlockScan<T, BLOCK_THREADS> BlockScanT;
// Allocate type-safe, repurposable shared memory for collectives
__shared__ union {
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
int bx = blockIdx.x;
int by = blockIdx.y;
BlockPrefixCallbackOp<T> prefix_op(0);
T block_aggregate = static_cast<T>(0);
// Obtain this block's segment of consecutive keys (blocked across threads)
int item_per_block = BLOCK_THREADS * ITEMS_PER_THREAD;
for (int block_offset = 0; block_offset < scan_size;
block_offset += BLOCK_THREADS * ITEMS_PER_THREAD) {
int valid_item = (scan_size - block_offset > item_per_block)
? item_per_block
: (scan_size - block_offset);
if (scan_size < item_per_block) {
valid_item = scan_size;
}
int offset = bx * scan_size + block_offset + by * (inner_size * scan_size);
T thread_keys[ITEMS_PER_THREAD];
BlockLoadT(temp_storage.load)
.Load(d_in + offset, thread_keys, valid_item, 0);
__syncthreads();
if (exclusive) {
T init_value = static_cast<T>(0);
BlockScanT(temp_storage.scan)
.ExclusiveScan(thread_keys, thread_keys, cub::Sum(), prefix_op);
} else {
BlockScanT(temp_storage.scan)
.InclusiveScan(thread_keys, thread_keys, cub::Sum(), prefix_op);
}
__syncthreads();
BlockStoreT(temp_storage.store)
.Store(d_out + offset, thread_keys, valid_item);
}
}
template <typename DeviceContext, typename T>
class CumCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<framework::Tensor>("X");
auto* out = context.Output<framework::Tensor>("Out");
int axis = context.Attr<int>("axis");
bool exclusive = context.Attr<bool>("exclusive");
bool reverse = context.Attr<bool>("reverse");
auto out_dims = out->dims();
auto size = in->numel();
PADDLE_ENFORCE_EQ(
axis < out_dims.size() && axis >= (0 - out_dims.size()), true,
platform::errors::OutOfRange(
"Attr(axis) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(axis) = %d.",
out_dims.size(), out_dims.size() - 1, axis));
if (axis < 0) {
axis += out_dims.size();
}
T* out_data = out->mutable_data<T>(context.GetPlace());
const T* in_data = in->data<T>();
// Use thrust for parallel acceleration when the input size is equal to the
// length of the ‘axis’ dimension.
if (size == out_dims[axis]) {
if (reverse) {
thrust::device_ptr<const T> dev_ptr =
thrust::device_pointer_cast(in_data);
thrust::device_vector<T> vec(dev_ptr, dev_ptr + size);
if (exclusive) {
thrust::exclusive_scan(thrust::device, vec.rbegin(), vec.rend(),
out_data);
} else {
thrust::inclusive_scan(thrust::device, vec.rbegin(), vec.rend(),
out_data);
}
thrust::reverse(thrust::device, out_data, out_data + size);
} else {
if (exclusive) {
thrust::exclusive_scan(thrust::device, in_data, in_data + size,
out_data);
} else {
thrust::inclusive_scan(thrust::device, in_data, in_data + size,
out_data);
}
}
return;
}
size_t height = 1;
size_t width = 1;
for (size_t i = 0; i <= axis; i++) {
height *= out_dims[i];
}
for (size_t i = axis + 1; i < out_dims.size(); i++) {
width *= out_dims[i];
}
int scan_size = out_dims[axis];
bool transpose = (axis != out_dims.size() - 1);
int tile_size = 32;
dim3 blocks(32, 8);
dim3 transpose_grids((width + tile_size - 1) / tile_size,
(height + tile_size - 1) / tile_size);
auto& dev_ctx = context.template device_context<DeviceContext>();
framework::Tensor tmp;
tmp.Resize(out_dims);
auto* tmp_data = tmp.mutable_data<T>(context.GetPlace());
T* next_in_data = out_data;
T* next_out_data = tmp_data;
if (transpose) {
MatrixTranspose<T, 32,
8><<<transpose_grids, blocks, 0, dev_ctx.stream()>>>(
out_data, in_data, height, width);
next_in_data = out_data;
next_out_data = tmp_data;
}
auto swap_ptr = [](T*& ptr1, T*& ptr2) {
T* tmp = ptr2;
ptr2 = ptr1;
ptr1 = tmp;
};
int outer_size = height / scan_size;
int inner_size = width;
// Consider the size of shared memory, here block size is 128
dim3 scan_grid(outer_size, inner_size);
dim3 reverse_grid = scan_grid;
if (reverse) {
if (transpose) {
reverse_grid.x = scan_grid.y;
reverse_grid.y = scan_grid.x;
MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>(
next_in_data, next_out_data, scan_size, outer_size, inner_size);
if (!transpose) next_in_data = tmp_data;
swap_ptr(next_in_data, next_out_data);
} else {
MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>(
in_data, out_data, scan_size, outer_size, inner_size);
}
}
if (!transpose && !reverse) {
BlockScanKernel<T, 128, 4><<<scan_grid, 128, 0, dev_ctx.stream()>>>(
out_data, in_data, outer_size, inner_size, scan_size, exclusive);
} else {
BlockScanKernel<T, 128, 4><<<scan_grid, 128, 0, dev_ctx.stream()>>>(
next_out_data, next_in_data, outer_size, inner_size, scan_size,
exclusive);
}
swap_ptr(next_in_data, next_out_data);
if (reverse) {
MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>(
next_in_data, next_out_data, scan_size, outer_size, inner_size);
swap_ptr(next_in_data, next_out_data);
}
if (transpose) {
transpose_grids.x = (height + tile_size - 1) / tile_size;
transpose_grids.y = (width + tile_size - 1) / tile_size;
MatrixTranspose<T, 32,
8><<<transpose_grids, blocks, 0, dev_ctx.stream()>>>(
next_out_data, next_in_data, width, height);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
cumsum, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
|
dfa3f51130fdd43df35878825e97054c08933934.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include "testlayers.h"
#include <array/ExtraArguments.h>
#include <array>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
using namespace sd;
class LambdaTests : public testing::Test {
public:
LambdaTests() {
printf("\n");
fflush(stdout);
}
};
template <typename Lambda>
__global__ void runLambda(double *input, double *output, Nd4jLong length, Lambda lambda) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong e = tid; e < length; e += gridDim.x * blockDim.x) {
output[e] = lambda(input[e]);
}
}
void launcher(hipStream_t *stream, double *input, double *output, Nd4jLong length) {
//auto f = [] __host__ __device__ (double x) -> double {
// return x + 1.;
//};
auto f = LAMBDA_D(x) {
return x+1.;
};
hipLaunchKernelGGL(( runLambda), dim3(128), dim3(128), 128, *stream, input, output, length, f);
}
// TEST_F(LambdaTests, test_basic_1) {
// auto x = NDArrayFactory::create<double>('c', {5});
// auto e = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.});
// //x.applyLambda<double>(f, nullptr);
// launcher(LaunchContext::defaultContext()->getCudaStream(), (double *)x.specialBuffer(), (double *)x.specialBuffer(), x.lengthOf());
// auto res = hipStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
// ASSERT_EQ(0, res);
// ASSERT_EQ(e, x);
// }
// void test(NDArray &x) {
// auto f = LAMBDA_D(x) {
// return x+1.;
// };
// x.applyLambda(f, x);
// }
// template <typename T>
// void test2(NDArray &x) {
// auto f = LAMBDA_T(x) {
// return x+1.;
// };
// x.applyLambda(f, x);
// }
// void testPairwise(NDArray &x, NDArray &y) {
// auto f = LAMBDA_DD(x, y) {
// return x + y +1.;
// };
// x.applyPairwiseLambda(y, f, x);
// }
// void testTriplewise(NDArray &i, NDArray &j, NDArray &k) {
// auto f = LAMBDA_DDD(i, j, k) {
// return i + j + k + 2.;
// };
// i.applyTriplewiseLambda(j, k, f, i);
// }
// void testIndexed(NDArray &x) {
// auto f = ILAMBDA_D(x) {
// return _idx + 1.;
// };
// x.applyIndexedLambda(f, x);
// }
// void testIndexedPairwise(NDArray &x, NDArray &y) {
// auto f = ILAMBDA_DD(x, y) {
// return _idx + x + y +1.;
// };
// x.applyIndexedPairwiseLambda(y, f, x);
// }
// TEST_F(LambdaTests, test_basic_2) {
// auto x = NDArrayFactory::create<double>('c', {5});
// auto e = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.});
// test(x);
// ASSERT_EQ(e, x);
// }
// TEST_F(LambdaTests, test_basic_3) {
// auto x = NDArrayFactory::create<float>('c', {5});
// auto e = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f});
// test(x);
// ASSERT_EQ(e, x);
// }
// TEST_F(LambdaTests, test_basic_4) {
// auto x = NDArrayFactory::create<float>('c', {5});
// auto e = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f});
// test2<float>(x);
// ASSERT_EQ(e, x);
// }
// TEST_F(LambdaTests, test_basic_5) {
// auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.});
// auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.});
// auto e = NDArrayFactory::create<double>('c', {5}, {4., 4., 4., 4., 4.});
// testPairwise(x, y);
// ASSERT_EQ(e, x);
// }
// TEST_F(LambdaTests, test_basic_6) {
// auto x = NDArrayFactory::create<double>('c', {5});
// auto e = NDArrayFactory::create<double>('c', {5}, {1., 2., 3., 4., 5.});
// testIndexed(x);
// ASSERT_EQ(e, x);
// }
// TEST_F(LambdaTests, test_basic_7) {
// auto w = NDArrayFactory::create<double>('c', {5}, {0., 0., 0., 0., 0.});
// auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.});
// auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.});
// auto e = NDArrayFactory::create<double>('c', {5}, {5., 5., 5., 5., 5.});
// testTriplewise(w, x, y);
// ASSERT_EQ(e, w);
// }
// TEST_F(LambdaTests, test_basic_8) {
// auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.});
// auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.});
// auto e = NDArrayFactory::create<double>('c', {5}, {4., 5., 6., 7., 8.});
// testIndexedPairwise(x, y);
// ASSERT_EQ(e, x);
// }
// template <typename T>
// void testPairwiseMy(NDArray &x, NDArray &y, NDArray &z) {
// auto f = LAMBDA_TT(x, y){
// return sd::math::nd4j_max<T>(x, (T)0.f)
// - x * y
// + sd::math::nd4j_log<T,T>((T)1.f
// + sd::math::nd4j_exp<T,T>(-sd::math::nd4j_abs(x)));
// };
// x.applyPairwiseLambda(y, f, z);
// }
// ///////////////////////////////////////////////////////////////////
// TEST_F(LambdaTests, test_basic_9) {
// NDArray labels('c', {2,3,4},{0,1,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,1,1,0,1,0});
// NDArray logits('c', {2,3,4}, sd::DataType::DOUBLE);
// NDArray output('c', {2,3,4}, sd::DataType::DOUBLE);
// NDArray expected('c', {2,3,4}, {0.744397, 0.598139, 0.554355, 0.913015, 0.474077, 1.037488, 0.403186, 1.171101, 0.341154, 1.313262, 0.287335, 1.463282, 0.241008, 1.620417, 0.201413, 1.783901, 0.167786, 1.952978, 2.039387, 0.126928, 0.115520, 2.305083, 0.095545, 2.486836});
// logits.linspace(0.1, 0.1);
// NDArray::prepareSpecialUse({&output}, {&logits, &labels});
// testPairwiseMy<double>(logits, labels, output);
// NDArray::registerSpecialUse({&output}, {&logits, &labels});
// // output.printBuffer(nullptr, -1, true);
// ASSERT_TRUE(expected.equalsTo(output));
// }
| dfa3f51130fdd43df35878825e97054c08933934.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include "testlayers.h"
#include <array/ExtraArguments.h>
#include <array>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace sd;
class LambdaTests : public testing::Test {
public:
LambdaTests() {
printf("\n");
fflush(stdout);
}
};
template <typename Lambda>
__global__ void runLambda(double *input, double *output, Nd4jLong length, Lambda lambda) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong e = tid; e < length; e += gridDim.x * blockDim.x) {
output[e] = lambda(input[e]);
}
}
void launcher(cudaStream_t *stream, double *input, double *output, Nd4jLong length) {
//auto f = [] __host__ __device__ (double x) -> double {
// return x + 1.;
//};
auto f = LAMBDA_D(x) {
return x+1.;
};
runLambda<<<128, 128, 128, *stream>>>(input, output, length, f);
}
// TEST_F(LambdaTests, test_basic_1) {
// auto x = NDArrayFactory::create<double>('c', {5});
// auto e = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.});
// //x.applyLambda<double>(f, nullptr);
// launcher(LaunchContext::defaultContext()->getCudaStream(), (double *)x.specialBuffer(), (double *)x.specialBuffer(), x.lengthOf());
// auto res = cudaStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
// ASSERT_EQ(0, res);
// ASSERT_EQ(e, x);
// }
// void test(NDArray &x) {
// auto f = LAMBDA_D(x) {
// return x+1.;
// };
// x.applyLambda(f, x);
// }
// template <typename T>
// void test2(NDArray &x) {
// auto f = LAMBDA_T(x) {
// return x+1.;
// };
// x.applyLambda(f, x);
// }
// void testPairwise(NDArray &x, NDArray &y) {
// auto f = LAMBDA_DD(x, y) {
// return x + y +1.;
// };
// x.applyPairwiseLambda(y, f, x);
// }
// void testTriplewise(NDArray &i, NDArray &j, NDArray &k) {
// auto f = LAMBDA_DDD(i, j, k) {
// return i + j + k + 2.;
// };
// i.applyTriplewiseLambda(j, k, f, i);
// }
// void testIndexed(NDArray &x) {
// auto f = ILAMBDA_D(x) {
// return _idx + 1.;
// };
// x.applyIndexedLambda(f, x);
// }
// void testIndexedPairwise(NDArray &x, NDArray &y) {
// auto f = ILAMBDA_DD(x, y) {
// return _idx + x + y +1.;
// };
// x.applyIndexedPairwiseLambda(y, f, x);
// }
// TEST_F(LambdaTests, test_basic_2) {
// auto x = NDArrayFactory::create<double>('c', {5});
// auto e = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.});
// test(x);
// ASSERT_EQ(e, x);
// }
// TEST_F(LambdaTests, test_basic_3) {
// auto x = NDArrayFactory::create<float>('c', {5});
// auto e = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f});
// test(x);
// ASSERT_EQ(e, x);
// }
// TEST_F(LambdaTests, test_basic_4) {
// auto x = NDArrayFactory::create<float>('c', {5});
// auto e = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f});
// test2<float>(x);
// ASSERT_EQ(e, x);
// }
// TEST_F(LambdaTests, test_basic_5) {
// auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.});
// auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.});
// auto e = NDArrayFactory::create<double>('c', {5}, {4., 4., 4., 4., 4.});
// testPairwise(x, y);
// ASSERT_EQ(e, x);
// }
// TEST_F(LambdaTests, test_basic_6) {
// auto x = NDArrayFactory::create<double>('c', {5});
// auto e = NDArrayFactory::create<double>('c', {5}, {1., 2., 3., 4., 5.});
// testIndexed(x);
// ASSERT_EQ(e, x);
// }
// TEST_F(LambdaTests, test_basic_7) {
// auto w = NDArrayFactory::create<double>('c', {5}, {0., 0., 0., 0., 0.});
// auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.});
// auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.});
// auto e = NDArrayFactory::create<double>('c', {5}, {5., 5., 5., 5., 5.});
// testTriplewise(w, x, y);
// ASSERT_EQ(e, w);
// }
// TEST_F(LambdaTests, test_basic_8) {
// auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.});
// auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.});
// auto e = NDArrayFactory::create<double>('c', {5}, {4., 5., 6., 7., 8.});
// testIndexedPairwise(x, y);
// ASSERT_EQ(e, x);
// }
// template <typename T>
// void testPairwiseMy(NDArray &x, NDArray &y, NDArray &z) {
// auto f = LAMBDA_TT(x, y){
// return sd::math::nd4j_max<T>(x, (T)0.f)
// - x * y
// + sd::math::nd4j_log<T,T>((T)1.f
// + sd::math::nd4j_exp<T,T>(-sd::math::nd4j_abs(x)));
// };
// x.applyPairwiseLambda(y, f, z);
// }
// ///////////////////////////////////////////////////////////////////
// TEST_F(LambdaTests, test_basic_9) {
// NDArray labels('c', {2,3,4},{0,1,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,1,1,0,1,0});
// NDArray logits('c', {2,3,4}, sd::DataType::DOUBLE);
// NDArray output('c', {2,3,4}, sd::DataType::DOUBLE);
// NDArray expected('c', {2,3,4}, {0.744397, 0.598139, 0.554355, 0.913015, 0.474077, 1.037488, 0.403186, 1.171101, 0.341154, 1.313262, 0.287335, 1.463282, 0.241008, 1.620417, 0.201413, 1.783901, 0.167786, 1.952978, 2.039387, 0.126928, 0.115520, 2.305083, 0.095545, 2.486836});
// logits.linspace(0.1, 0.1);
// NDArray::prepareSpecialUse({&output}, {&logits, &labels});
// testPairwiseMy<double>(logits, labels, output);
// NDArray::registerSpecialUse({&output}, {&logits, &labels});
// // output.printBuffer(nullptr, -1, true);
// ASSERT_TRUE(expected.equalsTo(output));
// }
|
5f7674eac0c000fb815f152f66b91589a6524349.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "sha1_.cu"
#include <string.h>
#define N 8
#define SPACE 10000
#define BLOCK_SIZE 16
__global__ void kernel(unsigned char* digest, unsigned char* find, bool* bingo) {
// keep the context in shared memory
__shared__ unsigned char ctx[16][16][N];
// keep the digest in the shared memory, too
//__shared__ unsigned char target[20];
// the digest we calculate
__shared__ unsigned char result[16][16][20];
// 00 - 99
int high = blockIdx.x * blockDim.x + threadIdx.x; if (high >= 10000) return;
// 00- 99
int low = blockIdx.y * blockDim.y + threadIdx.y; if (low >= 10000) return;
/*
// only one thread of a block has the responsibility to dump the digest
if (threadIdx.x==0 && threadIdx.y==0) {
for (int i=0; i<20; i++)
target[i] = digest[i];
}
__syncthreads(); // !!
*/
// generate and assign context
ctx[threadIdx.x][threadIdx.y][0] = (unsigned char)(high /1000 + 48);
ctx[threadIdx.x][threadIdx.y][1] = (unsigned char)((high % 1000) / 100 + 48);
ctx[threadIdx.x][threadIdx.y][2] = (unsigned char)((high % 100) / 10 + 48);
ctx[threadIdx.x][threadIdx.y][3] = (unsigned char)(high % 10 + 48);
ctx[threadIdx.x][threadIdx.y][4] = (unsigned char)(low/1000 + 48);
ctx[threadIdx.x][threadIdx.y][5] = (unsigned char)((low % 1000) / 100 + 48);
ctx[threadIdx.x][threadIdx.y][6] = (unsigned char)((low % 100) / 10 + 48);
ctx[threadIdx.x][threadIdx.y][7] = (unsigned char)((low % 10 + 48));
// sha1
sha1(result[threadIdx.x][threadIdx.y], ctx[threadIdx.x][threadIdx.y], N);
// compare the result to the digest
int flag = 1;
for (int i=0; i<20; i++) {
if (result[threadIdx.x][threadIdx.y][i] != digest[i]) {
flag = 0;
break;
}
}
//find !!
if (flag==1) {
find[0] = ctx[threadIdx.x][threadIdx.y][0];
find[1] = ctx[threadIdx.x][threadIdx.y][1];
find[2] = ctx[threadIdx.x][threadIdx.y][2];
find[3] = ctx[threadIdx.x][threadIdx.y][3];
find[4] = ctx[threadIdx.x][threadIdx.y][4];
find[5] = ctx[threadIdx.x][threadIdx.y][5];
find[6] = ctx[threadIdx.x][threadIdx.y][6];
find[7] = ctx[threadIdx.x][threadIdx.y][7];
*bingo = true;
}
}
int main(int argc, char** argv) {
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
if (argc <1) {
printf("wrong arguments\n");
return -1;
}
// readin
char* input = argv[1];
unsigned char cypher[20];
for (int i=0; i<20;i++) {
unsigned char high = input[2*i];
unsigned char low = input[2*i+1];
unsigned char combine;
switch(high) {
case '1': combine = 0x10; break;
case '2': combine = 0x20; break;
case '3': combine = 0x30; break;
case '4': combine = 0x40; break;
case '5': combine = 0x50; break;
case '6': combine = 0x60; break;
case '7': combine = 0x70; break;
case '8': combine = 0x80; break;
case '9': combine = 0x90; break;
case 'a': combine = 0xa0; break;
case 'b': combine = 0xb0; break;
case 'c': combine = 0xc0; break;
case 'd': combine = 0xd0; break;
case 'e': combine = 0xe0; break;
case 'f': combine = 0xf0; break;
default: combine = 0x00;
}
switch(low) {
case '1': combine |= 0x01; break;
case '2': combine |= 0x02; break;
case '3': combine |= 0x03; break;
case '4': combine |= 0x04; break;
case '5': combine |= 0x05; break;
case '6': combine |= 0x06; break;
case '7': combine |= 0x07; break;
case '8': combine |= 0x08; break;
case '9': combine |= 0x09; break;
case 'a': combine |= 0x0a; break;
case 'b': combine |= 0x0b; break;
case 'c': combine |= 0x0c; break;
case 'd': combine |= 0x0d; break;
case 'e': combine |= 0x0e; break;
case 'f': combine |= 0x0f; break;
default: combine |= 0x00;
}
cypher[i] = combine;
}
printf("\ncypher:");
for (int i=0; i<20; i++) {
printf("%x", cypher[i]);
}
printf("\n");
// cypher has been prepared
unsigned char *digest, *digest_d, *find_d, *find;
digest = cypher;
dim3 blocksPerGrid((10000+15)/16, (10000+15)/16);
dim3 threadsPerBlock(16, 16);
// digest
hipMalloc((void**) &digest_d, sizeof(unsigned char)*20);
hipMemcpy(digest_d, digest, sizeof(unsigned char)*20, hipMemcpyHostToDevice);
// find output
hipMalloc((void**) &find_d, sizeof(unsigned char)*N);
find = (unsigned char*) malloc(sizeof(unsigned char)*N);
// bingo
bool *bingo, *bingo_d;
bingo = (bool *) malloc(sizeof(bool));
*bingo = false;
hipMalloc((void**) &bingo_d, sizeof(bool));
hipMemcpy(bingo_d, bingo, sizeof(bool), hipMemcpyHostToDevice);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, digest_d, find_d, bingo_d);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
// get the output
hipMemcpy(find, find_d, sizeof(unsigned char)*N, hipMemcpyDeviceToHost);
hipMemcpy(bingo, bingo_d, sizeof(bool), hipMemcpyDeviceToHost);
if (*bingo==true) {
printf("\nbingo!\n");
printf("\nplain:");
for (int i=0; i<N; i++)
printf("%c", find[i]);
} else {
printf("not found!");
}
printf("\ntime:%f\n", elapsedTime);
hipFree(find_d);
hipFree(digest_d);
hipFree(bingo_d);
free(find);
free(bingo);
return 0;
}
| 5f7674eac0c000fb815f152f66b91589a6524349.cu | #include <stdio.h>
#include <stdlib.h>
#include "sha1_.cu"
#include <string.h>
#define N 8
#define SPACE 10000
#define BLOCK_SIZE 16
__global__ void kernel(unsigned char* digest, unsigned char* find, bool* bingo) {
// keep the context in shared memory
__shared__ unsigned char ctx[16][16][N];
// keep the digest in the shared memory, too
//__shared__ unsigned char target[20];
// the digest we calculate
__shared__ unsigned char result[16][16][20];
// 00 - 99
int high = blockIdx.x * blockDim.x + threadIdx.x; if (high >= 10000) return;
// 00- 99
int low = blockIdx.y * blockDim.y + threadIdx.y; if (low >= 10000) return;
/*
// only one thread of a block has the responsibility to dump the digest
if (threadIdx.x==0 && threadIdx.y==0) {
for (int i=0; i<20; i++)
target[i] = digest[i];
}
__syncthreads(); // !!
*/
// generate and assign context
ctx[threadIdx.x][threadIdx.y][0] = (unsigned char)(high /1000 + 48);
ctx[threadIdx.x][threadIdx.y][1] = (unsigned char)((high % 1000) / 100 + 48);
ctx[threadIdx.x][threadIdx.y][2] = (unsigned char)((high % 100) / 10 + 48);
ctx[threadIdx.x][threadIdx.y][3] = (unsigned char)(high % 10 + 48);
ctx[threadIdx.x][threadIdx.y][4] = (unsigned char)(low/1000 + 48);
ctx[threadIdx.x][threadIdx.y][5] = (unsigned char)((low % 1000) / 100 + 48);
ctx[threadIdx.x][threadIdx.y][6] = (unsigned char)((low % 100) / 10 + 48);
ctx[threadIdx.x][threadIdx.y][7] = (unsigned char)((low % 10 + 48));
// sha1
sha1(result[threadIdx.x][threadIdx.y], ctx[threadIdx.x][threadIdx.y], N);
// compare the result to the digest
int flag = 1;
for (int i=0; i<20; i++) {
if (result[threadIdx.x][threadIdx.y][i] != digest[i]) {
flag = 0;
break;
}
}
//find !!
if (flag==1) {
find[0] = ctx[threadIdx.x][threadIdx.y][0];
find[1] = ctx[threadIdx.x][threadIdx.y][1];
find[2] = ctx[threadIdx.x][threadIdx.y][2];
find[3] = ctx[threadIdx.x][threadIdx.y][3];
find[4] = ctx[threadIdx.x][threadIdx.y][4];
find[5] = ctx[threadIdx.x][threadIdx.y][5];
find[6] = ctx[threadIdx.x][threadIdx.y][6];
find[7] = ctx[threadIdx.x][threadIdx.y][7];
*bingo = true;
}
}
int main(int argc, char** argv) {
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (argc <1) {
printf("wrong arguments\n");
return -1;
}
// readin
char* input = argv[1];
unsigned char cypher[20];
for (int i=0; i<20;i++) {
unsigned char high = input[2*i];
unsigned char low = input[2*i+1];
unsigned char combine;
switch(high) {
case '1': combine = 0x10; break;
case '2': combine = 0x20; break;
case '3': combine = 0x30; break;
case '4': combine = 0x40; break;
case '5': combine = 0x50; break;
case '6': combine = 0x60; break;
case '7': combine = 0x70; break;
case '8': combine = 0x80; break;
case '9': combine = 0x90; break;
case 'a': combine = 0xa0; break;
case 'b': combine = 0xb0; break;
case 'c': combine = 0xc0; break;
case 'd': combine = 0xd0; break;
case 'e': combine = 0xe0; break;
case 'f': combine = 0xf0; break;
default: combine = 0x00;
}
switch(low) {
case '1': combine |= 0x01; break;
case '2': combine |= 0x02; break;
case '3': combine |= 0x03; break;
case '4': combine |= 0x04; break;
case '5': combine |= 0x05; break;
case '6': combine |= 0x06; break;
case '7': combine |= 0x07; break;
case '8': combine |= 0x08; break;
case '9': combine |= 0x09; break;
case 'a': combine |= 0x0a; break;
case 'b': combine |= 0x0b; break;
case 'c': combine |= 0x0c; break;
case 'd': combine |= 0x0d; break;
case 'e': combine |= 0x0e; break;
case 'f': combine |= 0x0f; break;
default: combine |= 0x00;
}
cypher[i] = combine;
}
printf("\ncypher:");
for (int i=0; i<20; i++) {
printf("%x", cypher[i]);
}
printf("\n");
// cypher has been prepared
unsigned char *digest, *digest_d, *find_d, *find;
digest = cypher;
dim3 blocksPerGrid((10000+15)/16, (10000+15)/16);
dim3 threadsPerBlock(16, 16);
// digest
cudaMalloc((void**) &digest_d, sizeof(unsigned char)*20);
cudaMemcpy(digest_d, digest, sizeof(unsigned char)*20, cudaMemcpyHostToDevice);
// find output
cudaMalloc((void**) &find_d, sizeof(unsigned char)*N);
find = (unsigned char*) malloc(sizeof(unsigned char)*N);
// bingo
bool *bingo, *bingo_d;
bingo = (bool *) malloc(sizeof(bool));
*bingo = false;
cudaMalloc((void**) &bingo_d, sizeof(bool));
cudaMemcpy(bingo_d, bingo, sizeof(bool), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
kernel<<<blocksPerGrid, threadsPerBlock>>>(digest_d, find_d, bingo_d);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
// get the output
cudaMemcpy(find, find_d, sizeof(unsigned char)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(bingo, bingo_d, sizeof(bool), cudaMemcpyDeviceToHost);
if (*bingo==true) {
printf("\nbingo!\n");
printf("\nplain:");
for (int i=0; i<N; i++)
printf("%c", find[i]);
} else {
printf("not found!");
}
printf("\ntime:%f\n", elapsedTime);
cudaFree(find_d);
cudaFree(digest_d);
cudaFree(bingo_d);
free(find);
free(bingo);
return 0;
}
|
fab8b92b8e89a2a555018ef996714304cda1dd0f.hip | // !!! This is a file automatically generated by hipify!!!
//raytracer.mustafaisik.net//
#include "texture_manager.cuh"
#include "cuda_utils.cuh"
#include <FreeImage/FreeImage.h>
TextureManager& TextureManager::Manager()
{
static TextureManager manager;
return manager;
}
void TextureManager::loadPerlinTexture(const Texture::SampleParams& sample_params)
{
m_textures.push_back(Texture(sample_params, Texture::PERLIN, 0));
}
//Checks if the "filepath" image exists or not.
//If it is loaded before, it uses the existing data for it.
//If it is not loaded before, it creates the new data.
void TextureManager::loadImageTexture(const std::string& filepath, const Texture::SampleParams& sample_params)
{
auto pair = m_namearray_pair.find(filepath.c_str());
hipArray* cuda_array = nullptr;
//If it is loaded before.
if (pair != m_namearray_pair.end())
{
cuda_array = pair->second;
}
//If it is not loaded before.
else
{
FREE_IMAGE_FORMAT fif = FIF_UNKNOWN;
FIBITMAP* dib_raw = nullptr;
BYTE* bits = nullptr;
fif = FreeImage_GetFileType(filepath.c_str(), 0);
if (fif == FIF_UNKNOWN)
{
fif = FreeImage_GetFIFFromFilename(filepath.c_str());
}
if (fif == FIF_UNKNOWN)
{
throw std::runtime_error("Error: Unknown image file format");
}
if (FreeImage_FIFSupportsReading(fif))
{
dib_raw = FreeImage_Load(fif, filepath.c_str());
}
if (!dib_raw)
{
throw std::runtime_error("Error: Failed to load the image file");
}
auto dib = FreeImage_ConvertTo32Bits(dib_raw);
FreeImage_FlipVertical(dib);
bits = FreeImage_GetBits(dib);
auto image_width = FreeImage_GetWidth(dib);
auto image_height = FreeImage_GetHeight(dib);
//Create the texture on device.
hipChannelFormatDesc channel_desc = hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned); //RGBA
HANDLE_ERROR(hipMallocArray(&cuda_array, &channel_desc, image_width, image_height));
HANDLE_ERROR(hipMemcpyToArray(cuda_array, 0, 0, bits, image_width * image_height * 4, hipMemcpyHostToDevice));
m_namearray_pair.insert(std::make_pair(filepath.c_str(), cuda_array));
FreeImage_Unload(dib_raw);
FreeImage_Unload(dib);
}
hipResourceDesc res_desc;
memset(&res_desc, 0, sizeof(res_desc));
res_desc.resType = hipResourceTypeArray;
res_desc.res.array.array = cuda_array;
hipTextureDesc tex_desc;
memset(&tex_desc, 0, sizeof(tex_desc));
tex_desc.addressMode[0] = hipTextureAddressMode(sample_params.address_mode);
tex_desc.addressMode[1] = hipTextureAddressMode(sample_params.address_mode);
tex_desc.filterMode = hipTextureFilterMode(sample_params.filter_mode);
tex_desc.readMode = hipReadModeNormalizedFloat;
tex_desc.normalizedCoords = 1;
hipTextureObject_t texture = 0;
HANDLE_ERROR(hipCreateTextureObject(&texture, &res_desc, &tex_desc, nullptr));
m_textures.push_back(Texture(sample_params, Texture::IMAGE, texture));
}
TextureManager::~TextureManager()
{
for (auto& texture : m_textures)
{
if (texture.get_texture_type() == Texture::IMAGE)
{
hipDestroyTextureObject(texture.get_texture());
}
}
for (auto& pair : m_namearray_pair)
{
hipFreeArray(pair.second);
}
} | fab8b92b8e89a2a555018ef996714304cda1dd0f.cu | //raytracer.mustafaisik.net//
#include "texture_manager.cuh"
#include "cuda_utils.cuh"
#include <FreeImage/FreeImage.h>
TextureManager& TextureManager::Manager()
{
static TextureManager manager;
return manager;
}
void TextureManager::loadPerlinTexture(const Texture::SampleParams& sample_params)
{
m_textures.push_back(Texture(sample_params, Texture::PERLIN, 0));
}
//Checks if the "filepath" image exists or not.
//If it is loaded before, it uses the existing data for it.
//If it is not loaded before, it creates the new data.
void TextureManager::loadImageTexture(const std::string& filepath, const Texture::SampleParams& sample_params)
{
auto pair = m_namearray_pair.find(filepath.c_str());
cudaArray* cuda_array = nullptr;
//If it is loaded before.
if (pair != m_namearray_pair.end())
{
cuda_array = pair->second;
}
//If it is not loaded before.
else
{
FREE_IMAGE_FORMAT fif = FIF_UNKNOWN;
FIBITMAP* dib_raw = nullptr;
BYTE* bits = nullptr;
fif = FreeImage_GetFileType(filepath.c_str(), 0);
if (fif == FIF_UNKNOWN)
{
fif = FreeImage_GetFIFFromFilename(filepath.c_str());
}
if (fif == FIF_UNKNOWN)
{
throw std::runtime_error("Error: Unknown image file format");
}
if (FreeImage_FIFSupportsReading(fif))
{
dib_raw = FreeImage_Load(fif, filepath.c_str());
}
if (!dib_raw)
{
throw std::runtime_error("Error: Failed to load the image file");
}
auto dib = FreeImage_ConvertTo32Bits(dib_raw);
FreeImage_FlipVertical(dib);
bits = FreeImage_GetBits(dib);
auto image_width = FreeImage_GetWidth(dib);
auto image_height = FreeImage_GetHeight(dib);
//Create the texture on device.
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned); //RGBA
HANDLE_ERROR(cudaMallocArray(&cuda_array, &channel_desc, image_width, image_height));
HANDLE_ERROR(cudaMemcpyToArray(cuda_array, 0, 0, bits, image_width * image_height * 4, cudaMemcpyHostToDevice));
m_namearray_pair.insert(std::make_pair(filepath.c_str(), cuda_array));
FreeImage_Unload(dib_raw);
FreeImage_Unload(dib);
}
cudaResourceDesc res_desc;
memset(&res_desc, 0, sizeof(res_desc));
res_desc.resType = cudaResourceTypeArray;
res_desc.res.array.array = cuda_array;
cudaTextureDesc tex_desc;
memset(&tex_desc, 0, sizeof(tex_desc));
tex_desc.addressMode[0] = cudaTextureAddressMode(sample_params.address_mode);
tex_desc.addressMode[1] = cudaTextureAddressMode(sample_params.address_mode);
tex_desc.filterMode = cudaTextureFilterMode(sample_params.filter_mode);
tex_desc.readMode = cudaReadModeNormalizedFloat;
tex_desc.normalizedCoords = 1;
cudaTextureObject_t texture = 0;
HANDLE_ERROR(cudaCreateTextureObject(&texture, &res_desc, &tex_desc, nullptr));
m_textures.push_back(Texture(sample_params, Texture::IMAGE, texture));
}
TextureManager::~TextureManager()
{
for (auto& texture : m_textures)
{
if (texture.get_texture_type() == Texture::IMAGE)
{
cudaDestroyTextureObject(texture.get_texture());
}
}
for (auto& pair : m_namearray_pair)
{
cudaFreeArray(pair.second);
}
} |
58e12c3ee89b8a8db4bd7d001c43eb89278c639f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_shell_private.h"
extern "C" {
#include "cuda_shell.h"
}
PetscErrorCode BuildMat_CUDAShell(PetscInt L,PetscInt nterms,PetscInt* masks,PetscInt* signs,PetscScalar* coeffs,Mat *A)
{
PetscErrorCode ierr;
PetscInt N,n;
shell_context *ctx;
N = 1<<L;
n = PETSC_DECIDE;
PetscSplitOwnership(PETSC_COMM_WORLD,&n,&N);
ierr = BuildContext_CUDA(L,nterms,masks,signs,coeffs,&ctx);CHKERRQ(ierr);
ierr = MatCreateShell(PETSC_COMM_WORLD,n,n,N,N,ctx,A);CHKERRQ(ierr);
ierr = MatShellSetOperation(*A,MATOP_MULT,(void(*)(void))MatMult_CUDAShell);
ierr = MatShellSetOperation(*A,MATOP_NORM,(void(*)(void))MatNorm_CUDAShell);
ierr = MatShellSetOperation(*A,MATOP_GET_VECS,(void(*)(void))MatCreateVecs_CUDAShell);
return ierr;
}
PetscErrorCode BuildContext_CUDA(PetscInt L,PetscInt nterms,PetscInt* masks,PetscInt* signs,PetscScalar* coeffs,shell_context **ctx_p)
{
PetscErrorCode ierr;
hipError_t err;
shell_context *ctx;
ierr = PetscMalloc(sizeof(shell_context),ctx_p);CHKERRQ(ierr);
ctx = (*ctx_p);
ctx->L = L;
ctx->nterms = nterms;
ctx->nrm = -1;
ctx->gpu = PETSC_TRUE;
err = hipMalloc((void **) &(ctx->masks), sizeof(PetscInt)*nterms);CHKERRCUDA(err);
err = hipMalloc((void **) &(ctx->signs), sizeof(PetscInt)*nterms);CHKERRCUDA(err);
err = hipMalloc((void **) &(ctx->coeffs), sizeof(PetscScalar)*nterms);CHKERRCUDA(err);
err = hipMemcpy(ctx->masks,masks,sizeof(PetscInt)*nterms,hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(ctx->signs,signs,sizeof(PetscInt)*nterms,hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(ctx->coeffs,coeffs,sizeof(PetscScalar)*nterms,hipMemcpyHostToDevice);CHKERRCUDA(err);
return ierr;
}
PetscErrorCode DestroyContext_CUDA(Mat A)
{
PetscErrorCode ierr;
hipError_t err;
shell_context *ctx;
ierr = MatShellGetContext(A,&ctx);CHKERRQ(ierr);
err = hipFree(ctx->masks);CHKERRCUDA(err);
err = hipFree(ctx->signs);CHKERRCUDA(err);
err = hipFree(ctx->coeffs);CHKERRCUDA(err);
ierr = PetscFree(ctx);CHKERRQ(ierr);
return ierr;
}
PetscErrorCode MatMult_CUDAShell(Mat M,Vec x,Vec b)
{
PetscErrorCode ierr;
hipError_t err;
shell_context *ctx;
const PetscScalar* xarray;
PetscScalar* barray;
PetscInt size;
ierr = VecSet(b,0);CHKERRQ(ierr);
ierr = MatShellGetContext(M,&ctx);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(x,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(b,&barray);CHKERRQ(ierr);
size = 1 << ctx->L;
err = hipDeviceSynchronize();CHKERRCUDA(err);
hipLaunchKernelGGL(( device_MatMult_Shell), dim3(GPU_BLOCK_NUM),dim3(GPU_BLOCK_SIZE), 0, 0, size,
ctx->masks,
ctx->signs,
ctx->coeffs,
ctx->nterms,
xarray,
barray);
err = hipDeviceSynchronize();CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(x,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(b,&barray);CHKERRQ(ierr);
return ierr;
}
__global__ void device_MatMult_Shell(PetscInt size,
PetscInt* masks,
PetscInt* signs,
PetscScalar* coeffs,
PetscInt nterms,
const PetscScalar* xarray,
PetscScalar* barray)
{
/* the following four lines come from the PETSc cuda source */
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar tmp,val;
PetscInt state,ket,mask,next_mask,this_start,i;
this_start = vec_start_index + threadIdx.x;
/* only access mask from global memory once */
/* on the gpu, unlike on parallel CPUs, we have access
* to the whole vector from any processor. That's awesome
* because it means that we can accumulate results by row
* instead of by column, and only do a single memory write
* per entry in the output vector. Then we don't have to worry
* about atomic operations either!
*/
for (ket=this_start; ket<vec_stop_index; ket += blockDim.x) {
val = 0;
mask = masks[0];
for (i=0;i<nterms;) {
tmp = 0;
state = ket ^ mask;
/* sum all terms for this matrix element */
do {
#if defined(PETSC_USE_64BIT_INDICES)
tmp += __popcll(state & signs[i])%2 ? -coeffs[i] : coeffs[i];
#else
tmp += __popc(state & signs[i])%2 ? -coeffs[i] : coeffs[i];
#endif
++i;
if (i == nterms) break;
next_mask = masks[i];
} while (mask == next_mask);
/* this can be optimized by keeping track of # of terms per matrix element.
I think that should actually make it a lot faster because it gets rid of
a significant chunk of the memory reads */
val += tmp * xarray[state];
mask = next_mask;
}
barray[ket] = val;
}
}
PetscErrorCode MatNorm_CUDAShell(Mat A,NormType type,PetscReal *nrm)
{
PetscErrorCode ierr;
hipError_t err;
shell_context *ctx;
PetscReal *d_maxs,*h_maxs;
PetscInt i,N;
if (type != NORM_INFINITY) {
SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_ARG_OUTOFRANGE,"Only NORM_INFINITY is implemented for shell matrices.");
}
ierr = MatShellGetContext(A,&ctx);CHKERRQ(ierr);
/*
keep the norm cached so we don't have to compute it all the time.
if we already have it, just return it
*/
if (ctx->nrm != -1) {
(*nrm) = ctx->nrm;
return ierr;
}
err = hipMalloc((void **) &d_maxs,sizeof(PetscReal)*GPU_BLOCK_NUM);CHKERRCUDA(err);
ierr = PetscMalloc(sizeof(PetscReal)*GPU_BLOCK_NUM,&h_maxs);CHKERRQ(ierr);
N = 1<<ctx->L;
hipLaunchKernelGGL(( device_MatNorm_Shell), dim3(GPU_BLOCK_NUM),dim3(GPU_BLOCK_SIZE),sizeof(PetscReal)*GPU_BLOCK_SIZE, 0, N,ctx->masks,ctx->signs,ctx->coeffs,ctx->nterms,d_maxs);
err = hipDeviceSynchronize();CHKERRCUDA(err);
err = hipMemcpy(h_maxs,d_maxs,sizeof(PetscReal)*GPU_BLOCK_NUM,hipMemcpyDeviceToHost);CHKERRCUDA(err);
/* now do max of h_maxs */
(*nrm) = 0;
for (i=0;i<GPU_BLOCK_NUM;++i) {
if (h_maxs[i] > (*nrm)) (*nrm) = h_maxs[i];
}
ctx->nrm = (*nrm);
err = hipFree(d_maxs);CHKERRCUDA(err);
ierr = PetscFree(h_maxs);CHKERRQ(ierr);
return ierr;
}
__global__ void device_MatNorm_Shell(PetscInt size,
PetscInt* masks,
PetscInt* signs,
PetscScalar* coeffs,
PetscInt nterms,
PetscReal *d_maxs)
{
extern __shared__ PetscReal threadmax[];
/* the following four lines come from the PETSc cuda source */
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscReal sum,v1,v2;
PetscScalar csum;
PetscInt state, i, mask, next_mask;
/* first find this thread's max and put it in threadmax */
threadmax[threadIdx.x] = 0;
for (state=vec_start_index+threadIdx.x;state<vec_stop_index;state += blockDim.x) {
sum = 0;
for (i=0;i<nterms;) {
csum = 0;
mask = masks[i];
/* sum all terms for this matrix element */
do {
#if defined(PETSC_USE_64BIT_INDICES)
csum += __popcll(state & signs[i])%2 ? -coeffs[i] : coeffs[i];
#else
csum += __popc(state & signs[i])%2 ? -coeffs[i] : coeffs[i];
#endif
++i;
if (i >= nterms) break;
next_mask = masks[i];
} while (mask == next_mask);
sum += abs(csum);
}
if (sum > threadmax[threadIdx.x]) {
threadmax[threadIdx.x] = sum;
}
}
__syncthreads();
/* now do the coolest reduce ever on the shared memory and hand it off to CPU */
for (i=1; i<blockDim.x; i*=2) {
if (threadIdx.x % (2*i) == 0) {
v1 = threadmax[threadIdx.x];
v2 = threadmax[threadIdx.x + i];
threadmax[threadIdx.x] = v1>v2 ? v1 : v2;
}
__syncthreads();
}
if (threadIdx.x == 0) d_maxs[blockIdx.x] = threadmax[0];
}
PetscErrorCode MatCreateVecs_CUDAShell(Mat mat, Vec *right, Vec *left)
{
PetscErrorCode ierr;
PetscInt N;
ierr = MatGetSize(mat,&N,NULL);CHKERRQ(ierr);
if (right) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr);
ierr = VecSetSizes(*right,PETSC_DECIDE,N);CHKERRQ(ierr);
ierr = VecSetFromOptions(*right);
}
if (left) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr);
ierr = VecSetSizes(*left,PETSC_DECIDE,N);CHKERRQ(ierr);
ierr = VecSetFromOptions(*left);
}
return 0;
}
| 58e12c3ee89b8a8db4bd7d001c43eb89278c639f.cu |
#include "cuda_shell_private.h"
extern "C" {
#include "cuda_shell.h"
}
PetscErrorCode BuildMat_CUDAShell(PetscInt L,PetscInt nterms,PetscInt* masks,PetscInt* signs,PetscScalar* coeffs,Mat *A)
{
PetscErrorCode ierr;
PetscInt N,n;
shell_context *ctx;
N = 1<<L;
n = PETSC_DECIDE;
PetscSplitOwnership(PETSC_COMM_WORLD,&n,&N);
ierr = BuildContext_CUDA(L,nterms,masks,signs,coeffs,&ctx);CHKERRQ(ierr);
ierr = MatCreateShell(PETSC_COMM_WORLD,n,n,N,N,ctx,A);CHKERRQ(ierr);
ierr = MatShellSetOperation(*A,MATOP_MULT,(void(*)(void))MatMult_CUDAShell);
ierr = MatShellSetOperation(*A,MATOP_NORM,(void(*)(void))MatNorm_CUDAShell);
ierr = MatShellSetOperation(*A,MATOP_GET_VECS,(void(*)(void))MatCreateVecs_CUDAShell);
return ierr;
}
PetscErrorCode BuildContext_CUDA(PetscInt L,PetscInt nterms,PetscInt* masks,PetscInt* signs,PetscScalar* coeffs,shell_context **ctx_p)
{
PetscErrorCode ierr;
cudaError_t err;
shell_context *ctx;
ierr = PetscMalloc(sizeof(shell_context),ctx_p);CHKERRQ(ierr);
ctx = (*ctx_p);
ctx->L = L;
ctx->nterms = nterms;
ctx->nrm = -1;
ctx->gpu = PETSC_TRUE;
err = cudaMalloc((void **) &(ctx->masks), sizeof(PetscInt)*nterms);CHKERRCUDA(err);
err = cudaMalloc((void **) &(ctx->signs), sizeof(PetscInt)*nterms);CHKERRCUDA(err);
err = cudaMalloc((void **) &(ctx->coeffs), sizeof(PetscScalar)*nterms);CHKERRCUDA(err);
err = cudaMemcpy(ctx->masks,masks,sizeof(PetscInt)*nterms,cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(ctx->signs,signs,sizeof(PetscInt)*nterms,cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(ctx->coeffs,coeffs,sizeof(PetscScalar)*nterms,cudaMemcpyHostToDevice);CHKERRCUDA(err);
return ierr;
}
PetscErrorCode DestroyContext_CUDA(Mat A)
{
PetscErrorCode ierr;
cudaError_t err;
shell_context *ctx;
ierr = MatShellGetContext(A,&ctx);CHKERRQ(ierr);
err = cudaFree(ctx->masks);CHKERRCUDA(err);
err = cudaFree(ctx->signs);CHKERRCUDA(err);
err = cudaFree(ctx->coeffs);CHKERRCUDA(err);
ierr = PetscFree(ctx);CHKERRQ(ierr);
return ierr;
}
PetscErrorCode MatMult_CUDAShell(Mat M,Vec x,Vec b)
{
PetscErrorCode ierr;
cudaError_t err;
shell_context *ctx;
const PetscScalar* xarray;
PetscScalar* barray;
PetscInt size;
ierr = VecSet(b,0);CHKERRQ(ierr);
ierr = MatShellGetContext(M,&ctx);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(x,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(b,&barray);CHKERRQ(ierr);
size = 1 << ctx->L;
err = cudaThreadSynchronize();CHKERRCUDA(err);
device_MatMult_Shell<<<GPU_BLOCK_NUM,GPU_BLOCK_SIZE>>>(size,
ctx->masks,
ctx->signs,
ctx->coeffs,
ctx->nterms,
xarray,
barray);
err = cudaThreadSynchronize();CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(x,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(b,&barray);CHKERRQ(ierr);
return ierr;
}
__global__ void device_MatMult_Shell(PetscInt size,
PetscInt* masks,
PetscInt* signs,
PetscScalar* coeffs,
PetscInt nterms,
const PetscScalar* xarray,
PetscScalar* barray)
{
/* the following four lines come from the PETSc cuda source */
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar tmp,val;
PetscInt state,ket,mask,next_mask,this_start,i;
this_start = vec_start_index + threadIdx.x;
/* only access mask from global memory once */
/* on the gpu, unlike on parallel CPUs, we have access
* to the whole vector from any processor. That's awesome
* because it means that we can accumulate results by row
* instead of by column, and only do a single memory write
* per entry in the output vector. Then we don't have to worry
* about atomic operations either!
*/
for (ket=this_start; ket<vec_stop_index; ket += blockDim.x) {
val = 0;
mask = masks[0];
for (i=0;i<nterms;) {
tmp = 0;
state = ket ^ mask;
/* sum all terms for this matrix element */
do {
#if defined(PETSC_USE_64BIT_INDICES)
tmp += __popcll(state & signs[i])%2 ? -coeffs[i] : coeffs[i];
#else
tmp += __popc(state & signs[i])%2 ? -coeffs[i] : coeffs[i];
#endif
++i;
if (i == nterms) break;
next_mask = masks[i];
} while (mask == next_mask);
/* this can be optimized by keeping track of # of terms per matrix element.
I think that should actually make it a lot faster because it gets rid of
a significant chunk of the memory reads */
val += tmp * xarray[state];
mask = next_mask;
}
barray[ket] = val;
}
}
PetscErrorCode MatNorm_CUDAShell(Mat A,NormType type,PetscReal *nrm)
{
PetscErrorCode ierr;
cudaError_t err;
shell_context *ctx;
PetscReal *d_maxs,*h_maxs;
PetscInt i,N;
if (type != NORM_INFINITY) {
SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_ARG_OUTOFRANGE,"Only NORM_INFINITY is implemented for shell matrices.");
}
ierr = MatShellGetContext(A,&ctx);CHKERRQ(ierr);
/*
keep the norm cached so we don't have to compute it all the time.
if we already have it, just return it
*/
if (ctx->nrm != -1) {
(*nrm) = ctx->nrm;
return ierr;
}
err = cudaMalloc((void **) &d_maxs,sizeof(PetscReal)*GPU_BLOCK_NUM);CHKERRCUDA(err);
ierr = PetscMalloc(sizeof(PetscReal)*GPU_BLOCK_NUM,&h_maxs);CHKERRQ(ierr);
N = 1<<ctx->L;
device_MatNorm_Shell<<<GPU_BLOCK_NUM,GPU_BLOCK_SIZE,sizeof(PetscReal)*GPU_BLOCK_SIZE>>>(N,ctx->masks,ctx->signs,ctx->coeffs,ctx->nterms,d_maxs);
err = cudaThreadSynchronize();CHKERRCUDA(err);
err = cudaMemcpy(h_maxs,d_maxs,sizeof(PetscReal)*GPU_BLOCK_NUM,cudaMemcpyDeviceToHost);CHKERRCUDA(err);
/* now do max of h_maxs */
(*nrm) = 0;
for (i=0;i<GPU_BLOCK_NUM;++i) {
if (h_maxs[i] > (*nrm)) (*nrm) = h_maxs[i];
}
ctx->nrm = (*nrm);
err = cudaFree(d_maxs);CHKERRCUDA(err);
ierr = PetscFree(h_maxs);CHKERRQ(ierr);
return ierr;
}
__global__ void device_MatNorm_Shell(PetscInt size,
PetscInt* masks,
PetscInt* signs,
PetscScalar* coeffs,
PetscInt nterms,
PetscReal *d_maxs)
{
extern __shared__ PetscReal threadmax[];
/* the following four lines come from the PETSc cuda source */
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscReal sum,v1,v2;
PetscScalar csum;
PetscInt state, i, mask, next_mask;
/* first find this thread's max and put it in threadmax */
threadmax[threadIdx.x] = 0;
for (state=vec_start_index+threadIdx.x;state<vec_stop_index;state += blockDim.x) {
sum = 0;
for (i=0;i<nterms;) {
csum = 0;
mask = masks[i];
/* sum all terms for this matrix element */
do {
#if defined(PETSC_USE_64BIT_INDICES)
csum += __popcll(state & signs[i])%2 ? -coeffs[i] : coeffs[i];
#else
csum += __popc(state & signs[i])%2 ? -coeffs[i] : coeffs[i];
#endif
++i;
if (i >= nterms) break;
next_mask = masks[i];
} while (mask == next_mask);
sum += abs(csum);
}
if (sum > threadmax[threadIdx.x]) {
threadmax[threadIdx.x] = sum;
}
}
__syncthreads();
/* now do the coolest reduce ever on the shared memory and hand it off to CPU */
for (i=1; i<blockDim.x; i*=2) {
if (threadIdx.x % (2*i) == 0) {
v1 = threadmax[threadIdx.x];
v2 = threadmax[threadIdx.x + i];
threadmax[threadIdx.x] = v1>v2 ? v1 : v2;
}
__syncthreads();
}
if (threadIdx.x == 0) d_maxs[blockIdx.x] = threadmax[0];
}
PetscErrorCode MatCreateVecs_CUDAShell(Mat mat, Vec *right, Vec *left)
{
PetscErrorCode ierr;
PetscInt N;
ierr = MatGetSize(mat,&N,NULL);CHKERRQ(ierr);
if (right) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr);
ierr = VecSetSizes(*right,PETSC_DECIDE,N);CHKERRQ(ierr);
ierr = VecSetFromOptions(*right);
}
if (left) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr);
ierr = VecSetSizes(*left,PETSC_DECIDE,N);CHKERRQ(ierr);
ierr = VecSetFromOptions(*left);
}
return 0;
}
|
07bf51147a9c95bece46f2ed8ab1cb6ffbb8fe6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/tuple.h>
#include "iou_box3d/iou_utils.cuh"
#include "utils/pytorch3d_cutils.h"
// Parallelize over N*M computations which can each be done
// independently
__global__ void IoUBox3DKernel(
const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> boxes1,
const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> boxes2,
at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> vols,
at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> ious) {
const size_t N = boxes1.size(0);
const size_t M = boxes2.size(0);
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
for (size_t i = tid; i < N * M; i += stride) {
const size_t n = i / M; // box1 index
const size_t m = i % M; // box2 index
// Convert to array of structs of face vertices i.e. effectively (F, 3, 3)
// FaceVerts is a data type defined in iou_utils.cuh
FaceVerts box1_tris[NUM_TRIS];
FaceVerts box2_tris[NUM_TRIS];
GetBoxTris(boxes1[n], box1_tris);
GetBoxTris(boxes2[m], box2_tris);
// Calculate the position of the center of the box which is used in
// several calculations. This requires a tensor as input.
const float3 box1_center = BoxCenter(boxes1[n]);
const float3 box2_center = BoxCenter(boxes2[m]);
// Convert to an array of face vertices
FaceVerts box1_planes[NUM_PLANES];
GetBoxPlanes(boxes1[n], box1_planes);
FaceVerts box2_planes[NUM_PLANES];
GetBoxPlanes(boxes2[m], box2_planes);
// Get Box Volumes
const float box1_vol = BoxVolume(box1_tris, box1_center, NUM_TRIS);
const float box2_vol = BoxVolume(box2_tris, box2_center, NUM_TRIS);
// Tris in Box1 intersection with Planes in Box2
// Initialize box1 intersecting faces. MAX_TRIS is the
// max faces possible in the intersecting shape.
// TODO: determine if the value of MAX_TRIS is sufficient or
// if we should store the max tris for each NxM computation
// and throw an error if any exceeds the max.
FaceVerts box1_intersect[MAX_TRIS];
for (int j = 0; j < NUM_TRIS; ++j) {
// Initialize the faces from the box
box1_intersect[j] = box1_tris[j];
}
// Get the count of the actual number of faces in the intersecting shape
int box1_count = BoxIntersections(box2_planes, box2_center, box1_intersect);
// Tris in Box2 intersection with Planes in Box1
FaceVerts box2_intersect[MAX_TRIS];
for (int j = 0; j < NUM_TRIS; ++j) {
box2_intersect[j] = box2_tris[j];
}
const int box2_count =
BoxIntersections(box1_planes, box1_center, box2_intersect);
// If there are overlapping regions in Box2, remove any coplanar faces
if (box2_count > 0) {
// Identify if any triangles in Box2 are coplanar with Box1
Keep tri2_keep[MAX_TRIS];
for (int j = 0; j < MAX_TRIS; ++j) {
// Initialize the valid faces to be true
tri2_keep[j].keep = j < box2_count ? true : false;
}
for (int b1 = 0; b1 < box1_count; ++b1) {
for (int b2 = 0; b2 < box2_count; ++b2) {
const bool is_coplanar =
IsCoplanarFace(box1_intersect[b1], box2_intersect[b2]);
if (is_coplanar) {
tri2_keep[b2].keep = false;
}
}
}
// Keep only the non coplanar triangles in Box2 - add them to the
// Box1 triangles.
for (int b2 = 0; b2 < box2_count; ++b2) {
if (tri2_keep[b2].keep) {
box1_intersect[box1_count] = box2_intersect[b2];
// box1_count will determine the total faces in the
// intersecting shape
box1_count++;
}
}
}
// Initialize the vol and iou to 0.0 in case there are no triangles
// in the intersecting shape.
float vol = 0.0;
float iou = 0.0;
// If there are triangles in the intersecting shape
if (box1_count > 0) {
// The intersecting shape is a polyhedron made up of the
// triangular faces that are all now in box1_intersect.
// Calculate the polyhedron center
const float3 poly_center = PolyhedronCenter(box1_intersect, box1_count);
// Compute intersecting polyhedron volume
vol = BoxVolume(box1_intersect, poly_center, box1_count);
// Compute IoU
iou = vol / (box1_vol + box2_vol - vol);
}
// Write the volume and IoU to global memory
vols[n][m] = vol;
ious[n][m] = iou;
}
}
std::tuple<at::Tensor, at::Tensor> IoUBox3DCuda(
const at::Tensor& boxes1, // (N, 8, 3)
const at::Tensor& boxes2) { // (M, 8, 3)
// Check inputs are on the same device
at::TensorArg boxes1_t{boxes1, "boxes1", 1}, boxes2_t{boxes2, "boxes2", 2};
at::CheckedFrom c = "IoUBox3DCuda";
at::checkAllSameGPU(c, {boxes1_t, boxes2_t});
at::checkAllSameType(c, {boxes1_t, boxes2_t});
// Set the device for the kernel launch based on the device of boxes1
at::hip::HIPGuardMasqueradingAsCUDA device_guard(boxes1.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(boxes2.size(2) == boxes1.size(2), "Boxes must have shape (8, 3)");
TORCH_CHECK(
(boxes2.size(1) == 8) && (boxes1.size(1) == 8),
"Boxes must have shape (8, 3)");
const int64_t N = boxes1.size(0);
const int64_t M = boxes2.size(0);
auto vols = at::zeros({N, M}, boxes1.options());
auto ious = at::zeros({N, M}, boxes1.options());
if (vols.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(vols, ious);
}
const size_t blocks = 512;
const size_t threads = 256;
hipLaunchKernelGGL(( IoUBox3DKernel), dim3(blocks), dim3(threads), 0, stream,
boxes1.packed_accessor64<float, 3, at::RestrictPtrTraits>(),
boxes2.packed_accessor64<float, 3, at::RestrictPtrTraits>(),
vols.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
ious.packed_accessor64<float, 2, at::RestrictPtrTraits>());
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(vols, ious);
}
| 07bf51147a9c95bece46f2ed8ab1cb6ffbb8fe6e.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/tuple.h>
#include "iou_box3d/iou_utils.cuh"
#include "utils/pytorch3d_cutils.h"
// Parallelize over N*M computations which can each be done
// independently
__global__ void IoUBox3DKernel(
const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> boxes1,
const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> boxes2,
at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> vols,
at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> ious) {
const size_t N = boxes1.size(0);
const size_t M = boxes2.size(0);
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
for (size_t i = tid; i < N * M; i += stride) {
const size_t n = i / M; // box1 index
const size_t m = i % M; // box2 index
// Convert to array of structs of face vertices i.e. effectively (F, 3, 3)
// FaceVerts is a data type defined in iou_utils.cuh
FaceVerts box1_tris[NUM_TRIS];
FaceVerts box2_tris[NUM_TRIS];
GetBoxTris(boxes1[n], box1_tris);
GetBoxTris(boxes2[m], box2_tris);
// Calculate the position of the center of the box which is used in
// several calculations. This requires a tensor as input.
const float3 box1_center = BoxCenter(boxes1[n]);
const float3 box2_center = BoxCenter(boxes2[m]);
// Convert to an array of face vertices
FaceVerts box1_planes[NUM_PLANES];
GetBoxPlanes(boxes1[n], box1_planes);
FaceVerts box2_planes[NUM_PLANES];
GetBoxPlanes(boxes2[m], box2_planes);
// Get Box Volumes
const float box1_vol = BoxVolume(box1_tris, box1_center, NUM_TRIS);
const float box2_vol = BoxVolume(box2_tris, box2_center, NUM_TRIS);
// Tris in Box1 intersection with Planes in Box2
// Initialize box1 intersecting faces. MAX_TRIS is the
// max faces possible in the intersecting shape.
// TODO: determine if the value of MAX_TRIS is sufficient or
// if we should store the max tris for each NxM computation
// and throw an error if any exceeds the max.
FaceVerts box1_intersect[MAX_TRIS];
for (int j = 0; j < NUM_TRIS; ++j) {
// Initialize the faces from the box
box1_intersect[j] = box1_tris[j];
}
// Get the count of the actual number of faces in the intersecting shape
int box1_count = BoxIntersections(box2_planes, box2_center, box1_intersect);
// Tris in Box2 intersection with Planes in Box1
FaceVerts box2_intersect[MAX_TRIS];
for (int j = 0; j < NUM_TRIS; ++j) {
box2_intersect[j] = box2_tris[j];
}
const int box2_count =
BoxIntersections(box1_planes, box1_center, box2_intersect);
// If there are overlapping regions in Box2, remove any coplanar faces
if (box2_count > 0) {
// Identify if any triangles in Box2 are coplanar with Box1
Keep tri2_keep[MAX_TRIS];
for (int j = 0; j < MAX_TRIS; ++j) {
// Initialize the valid faces to be true
tri2_keep[j].keep = j < box2_count ? true : false;
}
for (int b1 = 0; b1 < box1_count; ++b1) {
for (int b2 = 0; b2 < box2_count; ++b2) {
const bool is_coplanar =
IsCoplanarFace(box1_intersect[b1], box2_intersect[b2]);
if (is_coplanar) {
tri2_keep[b2].keep = false;
}
}
}
// Keep only the non coplanar triangles in Box2 - add them to the
// Box1 triangles.
for (int b2 = 0; b2 < box2_count; ++b2) {
if (tri2_keep[b2].keep) {
box1_intersect[box1_count] = box2_intersect[b2];
// box1_count will determine the total faces in the
// intersecting shape
box1_count++;
}
}
}
// Initialize the vol and iou to 0.0 in case there are no triangles
// in the intersecting shape.
float vol = 0.0;
float iou = 0.0;
// If there are triangles in the intersecting shape
if (box1_count > 0) {
// The intersecting shape is a polyhedron made up of the
// triangular faces that are all now in box1_intersect.
// Calculate the polyhedron center
const float3 poly_center = PolyhedronCenter(box1_intersect, box1_count);
// Compute intersecting polyhedron volume
vol = BoxVolume(box1_intersect, poly_center, box1_count);
// Compute IoU
iou = vol / (box1_vol + box2_vol - vol);
}
// Write the volume and IoU to global memory
vols[n][m] = vol;
ious[n][m] = iou;
}
}
std::tuple<at::Tensor, at::Tensor> IoUBox3DCuda(
const at::Tensor& boxes1, // (N, 8, 3)
const at::Tensor& boxes2) { // (M, 8, 3)
// Check inputs are on the same device
at::TensorArg boxes1_t{boxes1, "boxes1", 1}, boxes2_t{boxes2, "boxes2", 2};
at::CheckedFrom c = "IoUBox3DCuda";
at::checkAllSameGPU(c, {boxes1_t, boxes2_t});
at::checkAllSameType(c, {boxes1_t, boxes2_t});
// Set the device for the kernel launch based on the device of boxes1
at::cuda::CUDAGuard device_guard(boxes1.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(boxes2.size(2) == boxes1.size(2), "Boxes must have shape (8, 3)");
TORCH_CHECK(
(boxes2.size(1) == 8) && (boxes1.size(1) == 8),
"Boxes must have shape (8, 3)");
const int64_t N = boxes1.size(0);
const int64_t M = boxes2.size(0);
auto vols = at::zeros({N, M}, boxes1.options());
auto ious = at::zeros({N, M}, boxes1.options());
if (vols.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(vols, ious);
}
const size_t blocks = 512;
const size_t threads = 256;
IoUBox3DKernel<<<blocks, threads, 0, stream>>>(
boxes1.packed_accessor64<float, 3, at::RestrictPtrTraits>(),
boxes2.packed_accessor64<float, 3, at::RestrictPtrTraits>(),
vols.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
ious.packed_accessor64<float, 2, at::RestrictPtrTraits>());
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(vols, ious);
}
|
8bba1810736e9c1a16a2ea0890d0d866721e54d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/utilities/error.hpp>
#include <nvtext/detail/load_hash_file.hpp>
#include <nvtext/subword_tokenize.hpp>
#include <text/subword/detail/wordpiece_tokenizer.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/transform_scan.h>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief Convert tokens and row2tensor map to final tensor data.
*
* @param[in] token_ids Tokens from tokenizer
* @param[in] offsets Offsets to each string's output row of tokens
* @param[in] row2tensor String to tensor token counts
* @param[in] row2row_within_tensor Token counts within sub-rows of the output
* @param[in] max_sequence_length Maximum number of tokens in a row
* @param[in] nrows_tensor_token_ids Total number of output tensor rows
* @param[in] stride Number of tokens in sub-rows
* @param[in] do_truncate True if tokens should not spill into sub-rows in the output
* @param[out] final_tensor Output vector of token-ids
* @param[out] attn_mask Identifies valid token id entries
* @param[out] metadata Additional data per row
*/
__global__ void kernel_compute_tensor_metadata(
// input
uint32_t const* token_ids,
uint32_t const* offsets,
uint32_t const* row2tensor,
uint32_t const* row2row_within_tensor,
uint32_t max_sequence_length,
uint32_t nrows_tensor_token_ids,
uint32_t stride,
bool do_truncate,
// output
uint32_t* final_tensor,
uint32_t* attn_mask,
uint32_t* metadata)
{
uint32_t const output_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (output_idx >= (nrows_tensor_token_ids * max_sequence_length)) return;
uint32_t const absolute_row_id = output_idx / max_sequence_length;
uint32_t const tensor_id = row2tensor[absolute_row_id];
uint32_t const row_within_tensor = row2row_within_tensor[absolute_row_id];
uint32_t const offset_token_ids_tensor = offsets[tensor_id];
uint32_t const n_tokens_tensor = offsets[tensor_id + 1] - offset_token_ids_tensor;
// check for last row within tensor
bool const last_row_of_tensor = (absolute_row_id == nrows_tensor_token_ids - 1) ||
(row2tensor[absolute_row_id + 1] != tensor_id);
// compute input offset to retrieve token ids
uint32_t const token_idx = output_idx % max_sequence_length;
uint32_t const row_offset_token_ids =
offset_token_ids_tensor + token_idx +
(row_within_tensor ? (max_sequence_length + (stride * (row_within_tensor - 1))) : 0);
if (row_within_tensor == 0) {
if (token_idx < n_tokens_tensor) {
// copy token ids
final_tensor[output_idx] = token_ids[row_offset_token_ids];
attn_mask[output_idx] = 1;
} else {
// pad with 0
final_tensor[output_idx] = 0;
attn_mask[output_idx] = 0;
}
} else {
uint32_t const n_replicates = max_sequence_length - stride;
if ((row_offset_token_ids - n_replicates) < (offset_token_ids_tensor + n_tokens_tensor)) {
// replicate elements from previous row or copy new tokens
final_tensor[output_idx] = token_ids[row_offset_token_ids - n_replicates];
attn_mask[output_idx] = 1;
} else {
// pad with 0
final_tensor[output_idx] = 0;
attn_mask[output_idx] = 0;
}
}
// write metadata
if (token_idx == 0) {
auto const metadata_idx = absolute_row_id * 3; // three metadata values per output row
metadata[metadata_idx] = tensor_id;
metadata[metadata_idx + 1] = (row_within_tensor == 0) ? 0 : (max_sequence_length - stride) / 2;
metadata[metadata_idx + 2] = [&] {
if (!last_row_of_tensor) return max_sequence_length - (max_sequence_length - stride) / 2 - 1;
if (n_tokens_tensor <= max_sequence_length) // we fit, all good
return (n_tokens_tensor > 0) ? (n_tokens_tensor - 1) : 0;
if (do_truncate) return (max_sequence_length - 1);
auto const final_row_value =
(max_sequence_length - stride) + (n_tokens_tensor - max_sequence_length) % stride;
return (final_row_value > 0) ? (final_row_value - 1) : 0;
}();
}
}
} // namespace
tokenizer_result subword_tokenize(cudf::strings_column_view const& strings,
hashed_vocabulary const& vocab_table,
uint32_t max_sequence_length,
uint32_t stride,
bool do_lower_case,
bool do_truncate,
uint32_t max_rows_tensor,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(stride <= max_sequence_length,
"stride must be less than or equal to max_sequence_length");
CUDF_EXPECTS(max_sequence_length * max_rows_tensor <
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max()),
"max_sequence_length x max_rows_tensor is too large for cudf output column size");
auto const strings_count = strings.size();
if (strings_count == 0 || strings.chars_size() == 0)
return tokenizer_result{0,
max_sequence_length,
cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32}),
cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32}),
cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32})};
auto const offsets = strings.offsets();
auto const d_offsets = offsets.data<uint32_t>() + strings.offset();
auto const offset = cudf::detail::get_value<int32_t>(offsets, strings.offset(), stream);
auto const d_chars = strings.chars().data<char>() + offset;
// Create tokenizer
wordpiece_tokenizer tokenizer(
vocab_table, max_rows_tensor, max_sequence_length, stride, do_truncate, do_lower_case);
// Run tokenizer
auto const tokens = tokenizer.tokenize(d_chars, d_offsets, strings_count, stream);
// assign output components
uint32_t const* device_token_ids = tokens.first->data();
uint32_t const* device_offsets = tokens.second->data();
// Format output from tokenizer
// Each string can create 1 or more tensor entries.
// Compute the string-per-tensor offsets values by scanning
// over the number of tokens for each string.
rmm::device_uvector<uint32_t> offsets_per_tensor(strings_count + 1, stream);
auto d_offsets_per_tensor = offsets_per_tensor.data();
thrust::transform_exclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count + 1),
offsets_per_tensor.begin(),
[device_offsets, do_truncate, max_sequence_length, stride, strings_count] __device__(
cudf::size_type idx) {
uint32_t const num_tokens =
idx < strings_count ? device_offsets[idx + 1] - device_offsets[idx] : 0;
if (do_truncate || num_tokens <= max_sequence_length) return uint32_t{1};
return 1 + ((num_tokens - max_sequence_length + stride - 1) / stride);
},
uint32_t{0},
thrust::plus<uint32_t>());
// last element is the total number of output rows
uint32_t const nrows_tensor_token_ids = offsets_per_tensor.element(strings_count, stream);
// compute global_row to tensor, and global_row to within_tensor_row correspondence
rmm::device_uvector<uint32_t> row2tensor(nrows_tensor_token_ids, stream);
auto d_row2tensor = row2tensor.data();
rmm::device_uvector<uint32_t> row2row_within_tensor(nrows_tensor_token_ids, stream);
auto d_row2row_within_tensor = row2row_within_tensor.data();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<uint32_t>(0),
strings_count,
[d_offsets_per_tensor, d_row2tensor, d_row2row_within_tensor] __device__(auto idx) {
uint32_t offset = d_offsets_per_tensor[idx];
uint32_t nrows = d_offsets_per_tensor[idx + 1] - offset;
for (uint32_t jdx = 0; jdx < nrows; ++jdx) {
d_row2tensor[jdx + offset] = idx;
d_row2row_within_tensor[jdx + offset] = jdx;
}
});
// create output data columns
auto tensor_token_ids = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32},
nrows_tensor_token_ids * max_sequence_length,
cudf::mask_state::UNALLOCATED,
stream,
mr);
auto tensor_attention_mask =
cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32},
nrows_tensor_token_ids * max_sequence_length,
cudf::mask_state::UNALLOCATED,
stream,
mr);
auto tensor_metadata = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32},
nrows_tensor_token_ids * 3,
cudf::mask_state::UNALLOCATED,
stream,
mr);
// compute final-tensor, mask, and metadata
constexpr int block_size = 256;
cudf::detail::grid_1d const grid{
static_cast<cudf::size_type>(nrows_tensor_token_ids * max_sequence_length), block_size};
hipLaunchKernelGGL(( kernel_compute_tensor_metadata), dim3(grid.num_blocks),
dim3(grid.num_threads_per_block),
0,
stream.value(),
device_token_ids,
device_offsets,
d_row2tensor,
d_row2row_within_tensor,
max_sequence_length,
nrows_tensor_token_ids,
stride,
do_truncate,
tensor_token_ids->mutable_view().data<uint32_t>(),
tensor_attention_mask->mutable_view().data<uint32_t>(),
tensor_metadata->mutable_view().data<uint32_t>());
return tokenizer_result{nrows_tensor_token_ids,
max_sequence_length,
std::move(tensor_token_ids),
std::move(tensor_attention_mask),
std::move(tensor_metadata)};
}
} // namespace detail
tokenizer_result subword_tokenize(cudf::strings_column_view const& strings,
hashed_vocabulary const& vocabulary_table,
uint32_t max_sequence_length,
uint32_t stride,
bool do_lower_case,
bool do_truncate,
uint32_t max_rows_tensor,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::subword_tokenize(strings,
vocabulary_table,
max_sequence_length,
stride,
do_lower_case,
do_truncate,
max_rows_tensor,
rmm::cuda_stream_default,
mr);
}
} // namespace nvtext
| 8bba1810736e9c1a16a2ea0890d0d866721e54d0.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/utilities/error.hpp>
#include <nvtext/detail/load_hash_file.hpp>
#include <nvtext/subword_tokenize.hpp>
#include <text/subword/detail/wordpiece_tokenizer.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/transform_scan.h>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief Convert tokens and row2tensor map to final tensor data.
*
* @param[in] token_ids Tokens from tokenizer
* @param[in] offsets Offsets to each string's output row of tokens
* @param[in] row2tensor String to tensor token counts
* @param[in] row2row_within_tensor Token counts within sub-rows of the output
* @param[in] max_sequence_length Maximum number of tokens in a row
* @param[in] nrows_tensor_token_ids Total number of output tensor rows
* @param[in] stride Number of tokens in sub-rows
* @param[in] do_truncate True if tokens should not spill into sub-rows in the output
* @param[out] final_tensor Output vector of token-ids
* @param[out] attn_mask Identifies valid token id entries
* @param[out] metadata Additional data per row
*/
__global__ void kernel_compute_tensor_metadata(
// input
uint32_t const* token_ids,
uint32_t const* offsets,
uint32_t const* row2tensor,
uint32_t const* row2row_within_tensor,
uint32_t max_sequence_length,
uint32_t nrows_tensor_token_ids,
uint32_t stride,
bool do_truncate,
// output
uint32_t* final_tensor,
uint32_t* attn_mask,
uint32_t* metadata)
{
uint32_t const output_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (output_idx >= (nrows_tensor_token_ids * max_sequence_length)) return;
uint32_t const absolute_row_id = output_idx / max_sequence_length;
uint32_t const tensor_id = row2tensor[absolute_row_id];
uint32_t const row_within_tensor = row2row_within_tensor[absolute_row_id];
uint32_t const offset_token_ids_tensor = offsets[tensor_id];
uint32_t const n_tokens_tensor = offsets[tensor_id + 1] - offset_token_ids_tensor;
// check for last row within tensor
bool const last_row_of_tensor = (absolute_row_id == nrows_tensor_token_ids - 1) ||
(row2tensor[absolute_row_id + 1] != tensor_id);
// compute input offset to retrieve token ids
uint32_t const token_idx = output_idx % max_sequence_length;
uint32_t const row_offset_token_ids =
offset_token_ids_tensor + token_idx +
(row_within_tensor ? (max_sequence_length + (stride * (row_within_tensor - 1))) : 0);
if (row_within_tensor == 0) {
if (token_idx < n_tokens_tensor) {
// copy token ids
final_tensor[output_idx] = token_ids[row_offset_token_ids];
attn_mask[output_idx] = 1;
} else {
// pad with 0
final_tensor[output_idx] = 0;
attn_mask[output_idx] = 0;
}
} else {
uint32_t const n_replicates = max_sequence_length - stride;
if ((row_offset_token_ids - n_replicates) < (offset_token_ids_tensor + n_tokens_tensor)) {
// replicate elements from previous row or copy new tokens
final_tensor[output_idx] = token_ids[row_offset_token_ids - n_replicates];
attn_mask[output_idx] = 1;
} else {
// pad with 0
final_tensor[output_idx] = 0;
attn_mask[output_idx] = 0;
}
}
// write metadata
if (token_idx == 0) {
auto const metadata_idx = absolute_row_id * 3; // three metadata values per output row
metadata[metadata_idx] = tensor_id;
metadata[metadata_idx + 1] = (row_within_tensor == 0) ? 0 : (max_sequence_length - stride) / 2;
metadata[metadata_idx + 2] = [&] {
if (!last_row_of_tensor) return max_sequence_length - (max_sequence_length - stride) / 2 - 1;
if (n_tokens_tensor <= max_sequence_length) // we fit, all good
return (n_tokens_tensor > 0) ? (n_tokens_tensor - 1) : 0;
if (do_truncate) return (max_sequence_length - 1);
auto const final_row_value =
(max_sequence_length - stride) + (n_tokens_tensor - max_sequence_length) % stride;
return (final_row_value > 0) ? (final_row_value - 1) : 0;
}();
}
}
} // namespace
tokenizer_result subword_tokenize(cudf::strings_column_view const& strings,
hashed_vocabulary const& vocab_table,
uint32_t max_sequence_length,
uint32_t stride,
bool do_lower_case,
bool do_truncate,
uint32_t max_rows_tensor,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(stride <= max_sequence_length,
"stride must be less than or equal to max_sequence_length");
CUDF_EXPECTS(max_sequence_length * max_rows_tensor <
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max()),
"max_sequence_length x max_rows_tensor is too large for cudf output column size");
auto const strings_count = strings.size();
if (strings_count == 0 || strings.chars_size() == 0)
return tokenizer_result{0,
max_sequence_length,
cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32}),
cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32}),
cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32})};
auto const offsets = strings.offsets();
auto const d_offsets = offsets.data<uint32_t>() + strings.offset();
auto const offset = cudf::detail::get_value<int32_t>(offsets, strings.offset(), stream);
auto const d_chars = strings.chars().data<char>() + offset;
// Create tokenizer
wordpiece_tokenizer tokenizer(
vocab_table, max_rows_tensor, max_sequence_length, stride, do_truncate, do_lower_case);
// Run tokenizer
auto const tokens = tokenizer.tokenize(d_chars, d_offsets, strings_count, stream);
// assign output components
uint32_t const* device_token_ids = tokens.first->data();
uint32_t const* device_offsets = tokens.second->data();
// Format output from tokenizer
// Each string can create 1 or more tensor entries.
// Compute the string-per-tensor offsets values by scanning
// over the number of tokens for each string.
rmm::device_uvector<uint32_t> offsets_per_tensor(strings_count + 1, stream);
auto d_offsets_per_tensor = offsets_per_tensor.data();
thrust::transform_exclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count + 1),
offsets_per_tensor.begin(),
[device_offsets, do_truncate, max_sequence_length, stride, strings_count] __device__(
cudf::size_type idx) {
uint32_t const num_tokens =
idx < strings_count ? device_offsets[idx + 1] - device_offsets[idx] : 0;
if (do_truncate || num_tokens <= max_sequence_length) return uint32_t{1};
return 1 + ((num_tokens - max_sequence_length + stride - 1) / stride);
},
uint32_t{0},
thrust::plus<uint32_t>());
// last element is the total number of output rows
uint32_t const nrows_tensor_token_ids = offsets_per_tensor.element(strings_count, stream);
// compute global_row to tensor, and global_row to within_tensor_row correspondence
rmm::device_uvector<uint32_t> row2tensor(nrows_tensor_token_ids, stream);
auto d_row2tensor = row2tensor.data();
rmm::device_uvector<uint32_t> row2row_within_tensor(nrows_tensor_token_ids, stream);
auto d_row2row_within_tensor = row2row_within_tensor.data();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<uint32_t>(0),
strings_count,
[d_offsets_per_tensor, d_row2tensor, d_row2row_within_tensor] __device__(auto idx) {
uint32_t offset = d_offsets_per_tensor[idx];
uint32_t nrows = d_offsets_per_tensor[idx + 1] - offset;
for (uint32_t jdx = 0; jdx < nrows; ++jdx) {
d_row2tensor[jdx + offset] = idx;
d_row2row_within_tensor[jdx + offset] = jdx;
}
});
// create output data columns
auto tensor_token_ids = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32},
nrows_tensor_token_ids * max_sequence_length,
cudf::mask_state::UNALLOCATED,
stream,
mr);
auto tensor_attention_mask =
cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32},
nrows_tensor_token_ids * max_sequence_length,
cudf::mask_state::UNALLOCATED,
stream,
mr);
auto tensor_metadata = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32},
nrows_tensor_token_ids * 3,
cudf::mask_state::UNALLOCATED,
stream,
mr);
// compute final-tensor, mask, and metadata
constexpr int block_size = 256;
cudf::detail::grid_1d const grid{
static_cast<cudf::size_type>(nrows_tensor_token_ids * max_sequence_length), block_size};
kernel_compute_tensor_metadata<<<grid.num_blocks,
grid.num_threads_per_block,
0,
stream.value()>>>(
device_token_ids,
device_offsets,
d_row2tensor,
d_row2row_within_tensor,
max_sequence_length,
nrows_tensor_token_ids,
stride,
do_truncate,
tensor_token_ids->mutable_view().data<uint32_t>(),
tensor_attention_mask->mutable_view().data<uint32_t>(),
tensor_metadata->mutable_view().data<uint32_t>());
return tokenizer_result{nrows_tensor_token_ids,
max_sequence_length,
std::move(tensor_token_ids),
std::move(tensor_attention_mask),
std::move(tensor_metadata)};
}
} // namespace detail
tokenizer_result subword_tokenize(cudf::strings_column_view const& strings,
hashed_vocabulary const& vocabulary_table,
uint32_t max_sequence_length,
uint32_t stride,
bool do_lower_case,
bool do_truncate,
uint32_t max_rows_tensor,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::subword_tokenize(strings,
vocabulary_table,
max_sequence_length,
stride,
do_lower_case,
do_truncate,
max_rows_tensor,
rmm::cuda_stream_default,
mr);
}
} // namespace nvtext
|
2582883acb37f6875462126193da58c691614198.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "VecAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
VecAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
VecAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
VecAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2582883acb37f6875462126193da58c691614198.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "VecAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
VecAdd<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
VecAdd<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
VecAdd<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
59c6bff96becebda8ea18addd8d6c0ea63a110c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <cstdio>
#include <chrono>
#include <fstream>
#include <vector>
#include "Image.h"
#include "PPM.h"
#include "MorphableOperator.h"
int main (int argc, char **argv){
std::string path_input = "/home/user/myfolder/";
std::string path_results = path_input;
std::vector<std::string> filename = {"logitech_bill_clinton_bin.ppm", "apple_adam_bin.ppm", "micropro_wordstar_bin.ppm", "two_bytes_better_bin.ppm"};
std::vector<double> times[filename.size() * 6];
Image_t *input_img, *img, *output;
std::string name;
std::chrono::high_resolution_clock::time_point t_start, t_end;
std::chrono::duration<double> time_span;
for(auto file = filename.begin(); file != filename.end(); ++file){
input_img = PPM_import((path_input + *file).c_str());
printf("\nLoaded %s (%dx%d) \n", file->c_str(), input_img->width, input_img->height);
// 0.0 -> BLACK ; 1.0 -> WHITE
StructElem* se = new DiamondShape_SE(3);
// Extract only the first channel.
float *input = input_img->data;
if(input_img->channels > 1) {
input = (float *) malloc(input_img->width * input_img->height * sizeof(float));
for (int r = 0; r < input_img->height; r += 1) {
for (int c = 0; c < input_img->width; c += 1) {
input[r * input_img->width + c] = input_img->data[r * input_img->width * 3 + c * 3];
}
}
}
img = Image_new(input_img->width, input_img->height, 1, input);
// ELABORATION STAGE
// EROSION
printf("Erosion...\t");
output = erosion(img, se, &time_span);
hipDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((*file +(std::string)"_eroded.ppm").c_str(), output);
Image_delete(output);
// DILATATION
printf("Dilatation...\t");
output = dilatation(img, se, &time_span);
hipDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((path_results + *file +(std::string)"dilatated.ppm").c_str(), output);
Image_delete(output);
// OPENING
printf("Opening...\t");
output = opening(img, se, &time_span);
hipDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((path_results + *file +(std::string)"_opened.ppm").c_str(), output);
Image_delete(output);
// CLOSING
printf("Closing...\t");
output = closing(img, se, &time_span);
hipDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((path_results + *file +(std::string)"_closed.ppm").c_str(), output);
Image_delete(output);
// TOPHAT
printf("TOPHAT...\t");
output = topHat(img, se, &time_span);
hipDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((path_results + *file +(std::string)"_topHat.ppm").c_str(), output);
Image_delete(output);
// BOTTOMHAT
printf("BOTTOM HAT... \n");
output = bottomHat(img, se, &time_span);
hipDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((path_results + *file +(std::string)"_bottomHat.ppm").c_str(), output);
Image_delete(output);
free(input);
}
printf("Writing times on file...\n");
std::ofstream timings_file;
std::string fname = "timings_"+ std::to_string(TILE_WIDTH) + ".csv";
timings_file.open((path_results + fname).c_str());
auto it = times->begin();
for(auto file = filename.begin(); file != filename.end(); ++file){
timings_file << file->c_str() << "(TILE_WIDTH="<<TILE_WIDTH<<")"<< "\n";
timings_file << "EROSION;" << *it++ << "\n";
timings_file << "DILATATION;" <<*it++<<"\n";
timings_file << "OPENING;" << *it++ <<"\n";
timings_file << "CLOSING;" << *it++ <<"\n";
timings_file << "TOPHAT;" << *it++ <<"\n";
timings_file << "BOTTOMHAT;" << *it++ <<"\n";
}
timings_file.close();
printf("==== DONE ==== \n");
return 0;
}
| 59c6bff96becebda8ea18addd8d6c0ea63a110c7.cu | #include <cstdlib>
#include <cstdio>
#include <cstdio>
#include <chrono>
#include <fstream>
#include <vector>
#include "Image.h"
#include "PPM.h"
#include "MorphableOperator.h"
int main (int argc, char **argv){
std::string path_input = "/home/user/myfolder/";
std::string path_results = path_input;
std::vector<std::string> filename = {"logitech_bill_clinton_bin.ppm", "apple_adam_bin.ppm", "micropro_wordstar_bin.ppm", "two_bytes_better_bin.ppm"};
std::vector<double> times[filename.size() * 6];
Image_t *input_img, *img, *output;
std::string name;
std::chrono::high_resolution_clock::time_point t_start, t_end;
std::chrono::duration<double> time_span;
for(auto file = filename.begin(); file != filename.end(); ++file){
input_img = PPM_import((path_input + *file).c_str());
printf("\nLoaded %s (%dx%d) \n", file->c_str(), input_img->width, input_img->height);
// 0.0 -> BLACK ; 1.0 -> WHITE
StructElem* se = new DiamondShape_SE(3);
// Extract only the first channel.
float *input = input_img->data;
if(input_img->channels > 1) {
input = (float *) malloc(input_img->width * input_img->height * sizeof(float));
for (int r = 0; r < input_img->height; r += 1) {
for (int c = 0; c < input_img->width; c += 1) {
input[r * input_img->width + c] = input_img->data[r * input_img->width * 3 + c * 3];
}
}
}
img = Image_new(input_img->width, input_img->height, 1, input);
// ELABORATION STAGE
// EROSION
printf("Erosion...\t");
output = erosion(img, se, &time_span);
cudaDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((*file +(std::string)"_eroded.ppm").c_str(), output);
Image_delete(output);
// DILATATION
printf("Dilatation...\t");
output = dilatation(img, se, &time_span);
cudaDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((path_results + *file +(std::string)"dilatated.ppm").c_str(), output);
Image_delete(output);
// OPENING
printf("Opening...\t");
output = opening(img, se, &time_span);
cudaDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((path_results + *file +(std::string)"_opened.ppm").c_str(), output);
Image_delete(output);
// CLOSING
printf("Closing...\t");
output = closing(img, se, &time_span);
cudaDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((path_results + *file +(std::string)"_closed.ppm").c_str(), output);
Image_delete(output);
// TOPHAT
printf("TOPHAT...\t");
output = topHat(img, se, &time_span);
cudaDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((path_results + *file +(std::string)"_topHat.ppm").c_str(), output);
Image_delete(output);
// BOTTOMHAT
printf("BOTTOM HAT... \n");
output = bottomHat(img, se, &time_span);
cudaDeviceSynchronize();
times->push_back(time_span.count());
PPM_export((path_results + *file +(std::string)"_bottomHat.ppm").c_str(), output);
Image_delete(output);
free(input);
}
printf("Writing times on file...\n");
std::ofstream timings_file;
std::string fname = "timings_"+ std::to_string(TILE_WIDTH) + ".csv";
timings_file.open((path_results + fname).c_str());
auto it = times->begin();
for(auto file = filename.begin(); file != filename.end(); ++file){
timings_file << file->c_str() << "(TILE_WIDTH="<<TILE_WIDTH<<")"<< "\n";
timings_file << "EROSION;" << *it++ << "\n";
timings_file << "DILATATION;" <<*it++<<"\n";
timings_file << "OPENING;" << *it++ <<"\n";
timings_file << "CLOSING;" << *it++ <<"\n";
timings_file << "TOPHAT;" << *it++ <<"\n";
timings_file << "BOTTOMHAT;" << *it++ <<"\n";
}
timings_file.close();
printf("==== DONE ==== \n");
return 0;
}
|
d71d8eea90765e44e0cb5df840725037d4bba104.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* cudakernels.c
*
* Created on: Nov 7, 2011
* Author: igkiou
*/
#include "cudakernels.h"
#include "usecuda.h"
#define IMIN(X, Y) ((X) < (Y) ? (X) : (Y))
#define IMAX(X, Y) ((X) > (Y) ? (X) : (Y))
#define SIGN(X) ((X) > 0 ? (1) : (((X) < 0 ? (-(1)) : (0))))
#define ABS(X) ((X) > 0 ? (X) : (-(X)))
__global__ void cuSoftThreshold_sub(CUDOUBLE *X, CUDOUBLE tau, CUINT N) {
CUINT iterN = blockDim.x * blockIdx.x + threadIdx.x;
// if (iterN < N) {
// if ((threshTemp = ABS(X[iterN]) - tau) > 0) {
// X[iterN] = SIGN(X[iterN]) * threshTemp;
// } else {
// X[iterN] = 0;
// }
// }
iterN = IMAX(iterN, 0);
iterN = IMIN(iterN, N);
CUDOUBLE threshTemp = ABS(X[iterN]) - tau;
(threshTemp > 0) ? (X[iterN] = SIGN(X[iterN]) * threshTemp) \
: (X[iterN] = 0);
}
void cuSoftThreshold(CUDOUBLE *X, CUDOUBLE tau, CUINT N) {
CUINT threadsPerBlock = 8;
CUINT blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( cuSoftThreshold_sub), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, X, tau, N);
}
| d71d8eea90765e44e0cb5df840725037d4bba104.cu | /*
* cudakernels.c
*
* Created on: Nov 7, 2011
* Author: igkiou
*/
#include "cudakernels.h"
#include "usecuda.h"
#define IMIN(X, Y) ((X) < (Y) ? (X) : (Y))
#define IMAX(X, Y) ((X) > (Y) ? (X) : (Y))
#define SIGN(X) ((X) > 0 ? (1) : (((X) < 0 ? (-(1)) : (0))))
#define ABS(X) ((X) > 0 ? (X) : (-(X)))
__global__ void cuSoftThreshold_sub(CUDOUBLE *X, CUDOUBLE tau, CUINT N) {
CUINT iterN = blockDim.x * blockIdx.x + threadIdx.x;
// if (iterN < N) {
// if ((threshTemp = ABS(X[iterN]) - tau) > 0) {
// X[iterN] = SIGN(X[iterN]) * threshTemp;
// } else {
// X[iterN] = 0;
// }
// }
iterN = IMAX(iterN, 0);
iterN = IMIN(iterN, N);
CUDOUBLE threshTemp = ABS(X[iterN]) - tau;
(threshTemp > 0) ? (X[iterN] = SIGN(X[iterN]) * threshTemp) \
: (X[iterN] = 0);
}
void cuSoftThreshold(CUDOUBLE *X, CUDOUBLE tau, CUINT N) {
CUINT threadsPerBlock = 8;
CUINT blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
cuSoftThreshold_sub<<<blocksPerGrid, threadsPerBlock>>>(X, tau, N);
}
|
e9b536e42bd8e7f661539c46e5be85c26dcf280f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "AminoAcid.h"
#include <iostream>
#include <vector>
#include <fstream>
#include <cmath>
#include <string>
#define _USE_MATH_DEFINES
#define BOLTZ_CONST .0019872041
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using namespace std;
__global__ void sum4(double** pd_props, int size, double** pd_sums) {
int i = threadIdx.x;
while (i<size-3) {
double sum = 0.0;
for (int j = 0; j<4; j++) {
sum =+ *pd_props[i];
}
*pd_sums[i] = sum;
i++;
}
}
__global__ void calcProp(double** pd_sums, double** pd_finalProps, int temp) {
int i = threadIdx.x;
*pd_finalProps[i] = pow(M_E, -1*(*pd_sums[i])/(BOLTZ_CONST*temp));
}
int main(void) {
fstream dataFile;
dataFile.open("aminoAcidList.txt", ios::in);
thrust::host_vector<AminoAcid*> protein;
string line;
while (dataFile >> line) {
protein.push_back(new AminoAcid(line));
}
dataFile.close();
int temperature;
cout << "What is the temperature?" << endl;
cin >> temperature;
thrust::host_vector<double*> props(protein.size());
for (int i = 0; i<protein.size(); i++) {
*props[i] = protein[i]->getPropensity();
}
thrust::device_vector<double*> d_props;
double** pd_props = thrust::raw_pointer_cast(d_props.data());
thrust::device_vector<double*> d_sums;
double** pd_sums = thrust::raw_pointer_cast(d_sums.data());
thrust::device_vector<double*> d_finalProps;
double** pd_finalProps = thrust::raw_pointer_cast(d_finalProps.data());
int d_size;
int d_temp;
int tempSize = 5;
int *size = &tempSize;
*size = protein.size();
int propSize = 1;
for (int k = 2; k<protein.size()-2; k++) {
propSize += k;
}
hipMalloc((void **) &d_props, propSize*sizeof(double));
hipMalloc((void **) &d_sums, propSize*sizeof(double));
hipMalloc((void **) &d_size, sizeof(int));
hipMalloc((void **) &d_temp, sizeof(int));
hipMalloc((void **) &d_finalProps, propSize*sizeof(double));
thrust::host_vector<double*> sums(propSize);
hipMemcpy(&d_props, &props, protein.size()*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(&d_size, &size, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&d_temp, &temperature, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sum4), dim3(1), dim3(propSize), 0, 0, pd_props, d_size, pd_sums);
hipLaunchKernelGGL(( calcProp), dim3(1), dim3(propSize), 0, 0, pd_sums, pd_finalProps, d_temp);
hipMemcpy(&props, &d_props, propSize*sizeof(double), hipMemcpyDeviceToHost);
for (int i =0; i<props.size(); i++) {
if (*props[i] > .05) {
cout << *props[i] << endl;
}
}
hipFree(&d_finalProps);
hipFree(&d_props);
hipFree(&d_sums);
hipFree(&d_size);
hipFree(&d_temp);
return 0;
}
| e9b536e42bd8e7f661539c46e5be85c26dcf280f.cu | #include <cuda.h>
#include "AminoAcid.h"
#include <iostream>
#include <vector>
#include <fstream>
#include <cmath>
#include <string>
#define _USE_MATH_DEFINES
#define BOLTZ_CONST .0019872041
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using namespace std;
__global__ void sum4(double** pd_props, int size, double** pd_sums) {
int i = threadIdx.x;
while (i<size-3) {
double sum = 0.0;
for (int j = 0; j<4; j++) {
sum =+ *pd_props[i];
}
*pd_sums[i] = sum;
i++;
}
}
__global__ void calcProp(double** pd_sums, double** pd_finalProps, int temp) {
int i = threadIdx.x;
*pd_finalProps[i] = pow(M_E, -1*(*pd_sums[i])/(BOLTZ_CONST*temp));
}
int main(void) {
fstream dataFile;
dataFile.open("aminoAcidList.txt", ios::in);
thrust::host_vector<AminoAcid*> protein;
string line;
while (dataFile >> line) {
protein.push_back(new AminoAcid(line));
}
dataFile.close();
int temperature;
cout << "What is the temperature?" << endl;
cin >> temperature;
thrust::host_vector<double*> props(protein.size());
for (int i = 0; i<protein.size(); i++) {
*props[i] = protein[i]->getPropensity();
}
thrust::device_vector<double*> d_props;
double** pd_props = thrust::raw_pointer_cast(d_props.data());
thrust::device_vector<double*> d_sums;
double** pd_sums = thrust::raw_pointer_cast(d_sums.data());
thrust::device_vector<double*> d_finalProps;
double** pd_finalProps = thrust::raw_pointer_cast(d_finalProps.data());
int d_size;
int d_temp;
int tempSize = 5;
int *size = &tempSize;
*size = protein.size();
int propSize = 1;
for (int k = 2; k<protein.size()-2; k++) {
propSize += k;
}
cudaMalloc((void **) &d_props, propSize*sizeof(double));
cudaMalloc((void **) &d_sums, propSize*sizeof(double));
cudaMalloc((void **) &d_size, sizeof(int));
cudaMalloc((void **) &d_temp, sizeof(int));
cudaMalloc((void **) &d_finalProps, propSize*sizeof(double));
thrust::host_vector<double*> sums(propSize);
cudaMemcpy(&d_props, &props, protein.size()*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(&d_size, &size, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&d_temp, &temperature, sizeof(int), cudaMemcpyHostToDevice);
sum4<<<1, propSize>>>(pd_props, d_size, pd_sums);
calcProp<<<1, propSize>>>(pd_sums, pd_finalProps, d_temp);
cudaMemcpy(&props, &d_props, propSize*sizeof(double), cudaMemcpyDeviceToHost);
for (int i =0; i<props.size(); i++) {
if (*props[i] > .05) {
cout << *props[i] << endl;
}
}
cudaFree(&d_finalProps);
cudaFree(&d_props);
cudaFree(&d_sums);
cudaFree(&d_size);
cudaFree(&d_temp);
return 0;
}
|
44b789685bb7aadfbb8bf1ec0b6935db18f4de23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <benchmark/benchmark.h>
#include <bm_config.hpp>
#include <matazure/tensor>
using namespace matazure;
template <typename _ValueType>
void BM_host_zip_gold(benchmark::State &state) {
tensor<_ValueType, 1> ts0(state.range(0));
tensor<_ValueType, 1> ts1(ts0.shape());
tensor<_ValueType, 1> ts_re0(ts0.shape());
tensor<_ValueType, 1> ts_re1(ts0.shape());
while (state.KeepRunning()) {
for (int_t i = 0; i < ts0.size(); ++i) {
ts_re0[i] = ts0[i];
ts_re1[i] = ts1[i];
}
}
auto bytes_size = static_cast<size_t>(ts0.size()) * sizeof(_ValueType);
state.SetBytesProcessed(state.iterations() * bytes_size * 4);
}
template <typename _Tensor>
void BM_zip(benchmark::State &state) {
_Tensor ts0(state.range(0));
_Tensor ts1(ts0.shape());
_Tensor ts_re0(ts0.shape());
_Tensor ts_re1(ts0.shape());
while (state.KeepRunning()) {
auto ts_zip = zip(ts0, ts1);
auto ts_re_zip = zip(ts_re0, ts_re1);
copy(ts_zip, ts_re_zip);
}
auto bytes_size = static_cast<size_t>(ts0.size()) * sizeof(decltype(ts0[0]));
state.SetBytesProcessed(state.iterations() * bytes_size * 4);
}
//template <typename _ValueType>
//__global__ void zip_dim2_gold_kenel(_ValueType *p_dst, _ValueType *p1, _ValueType *p2, int_t count){
// for (int_t i = threadIdx.x + blockIdx.x * blockDim.x; i < count; i += blockDim.x * gridDim.x) {
// p_dst[i] = p1[i] * p2[i];
// }
//}
//
//template <typename _ValueType>
//void BM_cu_zip_gold(benchmark::State& state) {
// cuda::tensor<_ValueType, 1> ts1(state.range(0));
// cuda::tensor<_ValueType, 1> ts2(state.range(0));
// fill(ts1, _ValueType(1));
// fill(ts2, _ValueType(1));
//
// while (state.KeepRunning()) {
// cuda::tensor<float, 1> ts_re(ts1.shape());
// cuda::execution_policy policy;
// cuda::assert_runtime_success(cuda::configure_grid(policy, tensor_operation_gold_kenel<_ValueType>));
// tensor_operation_gold_kenel<<< policy.grid_size(),
// policy.block_size(),
// policy.shared_mem_bytes(),
// policy.stream() >>>(ts_re.data(), ts1.data(), ts2.data(), ts_re.size());
// }
//
// auto bytes_size = static_cast<size_t>(ts1.size()) * sizeof(_ValueType);
// state.SetBytesProcessed(state.iterations() * 2 * bytes_size);
//}
//
//template <typename _ValueType>
//void BM_zip_operation(benchmark::State &state) {
// cuda::tensor<_ValueType, 1> ts1(state.range(0));
// cuda::tensor<_ValueType, 1> ts2(state.range(0));
// fill(ts1, _ValueType(1));
// fill(ts2, _ValueType(1));
//
// while (state.KeepRunning()) {
// auto tsf_re = (ts1 * ts2 / ts1 + ts2).persist();
// }
//
// auto bytes_size = static_cast<size_t>(ts1.size()) * sizeof(decltype(ts1[0]));
// state.SetBytesProcessed(2 * state.iterations() * bytes_size);
//}
BENCHMARK_TEMPLATE(BM_host_zip_gold, float)->UseRealTime()->Range(1 << 16, 1 << (bm_config::max_host_memory_exponent() - 2));
auto BM_host_zip_byte = BM_zip<tensor<float, 1>>;
BENCHMARK(BM_host_zip_byte)->UseRealTime()->Range(1 << 16, 1 << (bm_config::max_host_memory_exponent() - 2));
BENCHMARK_TEMPLATE(BM_host_zip_gold, float)->UseRealTime()->Range(1 << 16, 1 << (bm_config::max_host_memory_exponent() - 2));
| 44b789685bb7aadfbb8bf1ec0b6935db18f4de23.cu | #include <benchmark/benchmark.h>
#include <bm_config.hpp>
#include <matazure/tensor>
using namespace matazure;
template <typename _ValueType>
void BM_host_zip_gold(benchmark::State &state) {
tensor<_ValueType, 1> ts0(state.range(0));
tensor<_ValueType, 1> ts1(ts0.shape());
tensor<_ValueType, 1> ts_re0(ts0.shape());
tensor<_ValueType, 1> ts_re1(ts0.shape());
while (state.KeepRunning()) {
for (int_t i = 0; i < ts0.size(); ++i) {
ts_re0[i] = ts0[i];
ts_re1[i] = ts1[i];
}
}
auto bytes_size = static_cast<size_t>(ts0.size()) * sizeof(_ValueType);
state.SetBytesProcessed(state.iterations() * bytes_size * 4);
}
template <typename _Tensor>
void BM_zip(benchmark::State &state) {
_Tensor ts0(state.range(0));
_Tensor ts1(ts0.shape());
_Tensor ts_re0(ts0.shape());
_Tensor ts_re1(ts0.shape());
while (state.KeepRunning()) {
auto ts_zip = zip(ts0, ts1);
auto ts_re_zip = zip(ts_re0, ts_re1);
copy(ts_zip, ts_re_zip);
}
auto bytes_size = static_cast<size_t>(ts0.size()) * sizeof(decltype(ts0[0]));
state.SetBytesProcessed(state.iterations() * bytes_size * 4);
}
//template <typename _ValueType>
//__global__ void zip_dim2_gold_kenel(_ValueType *p_dst, _ValueType *p1, _ValueType *p2, int_t count){
// for (int_t i = threadIdx.x + blockIdx.x * blockDim.x; i < count; i += blockDim.x * gridDim.x) {
// p_dst[i] = p1[i] * p2[i];
// }
//}
//
//template <typename _ValueType>
//void BM_cu_zip_gold(benchmark::State& state) {
// cuda::tensor<_ValueType, 1> ts1(state.range(0));
// cuda::tensor<_ValueType, 1> ts2(state.range(0));
// fill(ts1, _ValueType(1));
// fill(ts2, _ValueType(1));
//
// while (state.KeepRunning()) {
// cuda::tensor<float, 1> ts_re(ts1.shape());
// cuda::execution_policy policy;
// cuda::assert_runtime_success(cuda::configure_grid(policy, tensor_operation_gold_kenel<_ValueType>));
// tensor_operation_gold_kenel<<< policy.grid_size(),
// policy.block_size(),
// policy.shared_mem_bytes(),
// policy.stream() >>>(ts_re.data(), ts1.data(), ts2.data(), ts_re.size());
// }
//
// auto bytes_size = static_cast<size_t>(ts1.size()) * sizeof(_ValueType);
// state.SetBytesProcessed(state.iterations() * 2 * bytes_size);
//}
//
//template <typename _ValueType>
//void BM_zip_operation(benchmark::State &state) {
// cuda::tensor<_ValueType, 1> ts1(state.range(0));
// cuda::tensor<_ValueType, 1> ts2(state.range(0));
// fill(ts1, _ValueType(1));
// fill(ts2, _ValueType(1));
//
// while (state.KeepRunning()) {
// auto tsf_re = (ts1 * ts2 / ts1 + ts2).persist();
// }
//
// auto bytes_size = static_cast<size_t>(ts1.size()) * sizeof(decltype(ts1[0]));
// state.SetBytesProcessed(2 * state.iterations() * bytes_size);
//}
BENCHMARK_TEMPLATE(BM_host_zip_gold, float)->UseRealTime()->Range(1 << 16, 1 << (bm_config::max_host_memory_exponent() - 2));
auto BM_host_zip_byte = BM_zip<tensor<float, 1>>;
BENCHMARK(BM_host_zip_byte)->UseRealTime()->Range(1 << 16, 1 << (bm_config::max_host_memory_exponent() - 2));
BENCHMARK_TEMPLATE(BM_host_zip_gold, float)->UseRealTime()->Range(1 << 16, 1 << (bm_config::max_host_memory_exponent() - 2));
|
c4ff4737d9411076beee83a923c0778eb35adeb4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_cooperative_groups.h>
using namespace cooperative_groups;
/////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//////////////large vs small data.
//////creating 2 blocks doing exactly the same thing? also create larger intervals between block, E.g., sm0 and sm 32? & remote/nearby address
void init_cpu_data(long long int* A, long long int size, double stride){
for (long long int i = 0; i < size; i++){
A[i]=1;
}
/*
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
*/
}
__global__ void gpu_initialization(long long int *A, double data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x);
long long int thread_num = gridDim.x * blockDim.x;
for(long long int it = 0; it < data_size; it = it + thread_num){
A[index + it]=23;
}
}
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
#define stride 1024
///////////////262144 (2m), 4194304 (32m), 8388608 (64m),
__global__ void page_visitor(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////long
//thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 8388608;
//double temp = warpid * 1 * 16 + (threadIdx.x % 16) * 1;
//double temp = (threadIdx.x % 32) * 1;
//double temp = (threadIdx.x) * 1;
//double temp = (threadIdx.x % 32) * 2 + warpid * 1;
//double temp = (threadIdx.x) * 512;
//double temp = (threadIdx.x % 32) * 1024 + warpid * 512;
//double temp = (threadIdx.x) * 512;
//double temp = (threadIdx.x % 32) * 2048 + warpid * 512;
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 512;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 512 + blockIdx.x * 256;
//double temp = (threadIdx.x) * 1;
//double temp = (threadIdx.x % 32) * 16 + warpid * 1;
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 1;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 2 + blockIdx.x * 1;
double temp = (blockIdx.x * blockDim.x + threadIdx.x) * stride;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 2 + blockIdx.x * 1;
long long int index = __double2ll_rd(temp);
long long int value1;
if(blockIdx.x == 0 || blockIdx.x == 32){
value1 = A1[index];
if(threadIdx.x == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
int smid = 1;
asm("mov.u32 %0, %smid;" : "=r"(smid) );
printf("blockIdx.x: %d, smid: %d\n", blockIdx.x, smid);
}
/*
long long int clock_offset = 0;
while (clock_offset < clock_count){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
value1 = value1 + threadIdx.x;
}
*/
B1[index] = value1;
}
}
__global__ void page_visitor1(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////long half
//thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 8388608;
//double temp = warpid * 1 * 16 + (threadIdx.x % 16) * 1;
double temp = (blockIdx.x * blockDim.x + threadIdx.x) * stride;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 2 + blockIdx.x * 1;
long long int index = __double2ll_rd(temp);
long long int value1;
if(blockIdx.x == 0){
value1 = A1[index];
B1[index] = value1;
}
}
__global__ void page_visitor2(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////mixed
//thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 8388608;
//double temp = warpid * 1 * 16 + (threadIdx.x % 16) * 1;
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 1;
double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * gridDim.x + blockIdx.x * stride;
long long int index = __double2ll_rd(temp);
long long int value1;
if(blockIdx.x == 0 || blockIdx.x == 32){
value1 = A1[index];
B1[index] = value1;
}
}
__global__ void page_visitor3(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////mixed half
thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * gridDim.x + blockIdx.x * stride;
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 512;
long long int index = __double2ll_rd(temp);
long long int value1;
//if(blockIdx.x == 0 || blockIdx.x == 32){
if(blockIdx.x == 0){
value1 = A1[index];
B1[index] = value1;
}
}
__global__ void page_visitor4(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////long pause
thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
double temp = (blockIdx.x * blockDim.x + threadIdx.x) * stride;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * gridDim.x + blockIdx.x * stride;
long long int index = __double2ll_rd(temp);
long long int value1 = 7;
//if(blockIdx.x == 0 || blockIdx.x == 32){
if(blockIdx.x == 0){
value1 = A1[index];
}
if(blockIdx.x == 32){
long long int clock_offset = 0;
while (clock_offset < 65536){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
//value1 = value1 * 3;
asm("mul.lo.s64 %0, %1, 7;" : "=l"(value1) : "l"(value1));
asm("div.s64 %0, %1, 3;" : "=l"(value1) : "l"(value1));
}
value1 = A1[index];
}
if(blockIdx.x == 0 || blockIdx.x == 32){
B1[index] = value1;
}
}
__global__ void page_visitor5(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////mixed pause
thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * gridDim.x + blockIdx.x * stride;
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 512;
long long int index = __double2ll_rd(temp);
long long int value1 = 7;
//if(blockIdx.x == 0 || blockIdx.x == 32){
if(blockIdx.x == 0){
value1 = A1[index];
}
if(blockIdx.x == 32){
long long int clock_offset = 0;
while (clock_offset < 65536){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
//value1 = value1 * 3;
asm("mul.lo.s64 %0, %1, 7;" : "=l"(value1) : "l"(value1));
asm("div.s64 %0, %1, 3;" : "=l"(value1) : "l"(value1));
}
value1 = A1[index];
}
if(blockIdx.x == 0 || blockIdx.x == 32){
B1[index] = value1;
}
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//long long int dev_id = findCudaDevice(argc, (const char **) argv);
long long int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
//int peak_clk = 1;//kHz
//checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
//float clock_rate = (float) peak_clk;
//printf("clock_rate:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
/*
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
*/
int value1 = 1;
checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id));
//printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1);
///*
//printf("############approach\n");
for(long long int time = 0; time <= 5; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 32; clock_count <= 32; clock_count = clock_count * 2){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
if(0){
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
int block_num = 56;
if(time == 0){
hipLaunchKernelGGL(( page_visitor), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long
}
if(time == 1){
hipLaunchKernelGGL(( page_visitor1), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long half
}
if(time == 2){
hipLaunchKernelGGL(( page_visitor2), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);//////mixed
}
if(time == 3){
hipLaunchKernelGGL(( page_visitor3), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);//////mixed half
}
if(time == 4){
hipLaunchKernelGGL(( page_visitor4), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long pause
}
if(time == 5){
hipLaunchKernelGGL(( page_visitor5), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);//////mixed long
}
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(hipFree(CPU_data_in1));
checkCudaErrors(hipFree(GPU_data_out1));
}
}
}
}
}
}
}
printf("\n");
//*/
exit(EXIT_SUCCESS);
} | c4ff4737d9411076beee83a923c0778eb35adeb4.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
#include <sys/time.h>
#include <cooperative_groups.h>
using namespace cooperative_groups;
/////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//////////////large vs small data.
//////creating 2 blocks doing exactly the same thing? also create larger intervals between block, E.g., sm0 and sm 32? & remote/nearby address
void init_cpu_data(long long int* A, long long int size, double stride){
for (long long int i = 0; i < size; i++){
A[i]=1;
}
/*
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
*/
}
__global__ void gpu_initialization(long long int *A, double data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x);
long long int thread_num = gridDim.x * blockDim.x;
for(long long int it = 0; it < data_size; it = it + thread_num){
A[index + it]=23;
}
}
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
#define stride 1024
///////////////262144 (2m), 4194304 (32m), 8388608 (64m),
__global__ void page_visitor(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////long
//thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 8388608;
//double temp = warpid * 1 * 16 + (threadIdx.x % 16) * 1;
//double temp = (threadIdx.x % 32) * 1;
//double temp = (threadIdx.x) * 1;
//double temp = (threadIdx.x % 32) * 2 + warpid * 1;
//double temp = (threadIdx.x) * 512;
//double temp = (threadIdx.x % 32) * 1024 + warpid * 512;
//double temp = (threadIdx.x) * 512;
//double temp = (threadIdx.x % 32) * 2048 + warpid * 512;
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 512;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 512 + blockIdx.x * 256;
//double temp = (threadIdx.x) * 1;
//double temp = (threadIdx.x % 32) * 16 + warpid * 1;
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 1;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 2 + blockIdx.x * 1;
double temp = (blockIdx.x * blockDim.x + threadIdx.x) * stride;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 2 + blockIdx.x * 1;
long long int index = __double2ll_rd(temp);
long long int value1;
if(blockIdx.x == 0 || blockIdx.x == 32){
value1 = A1[index];
if(threadIdx.x == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
int smid = 1;
asm("mov.u32 %0, %smid;" : "=r"(smid) );
printf("blockIdx.x: %d, smid: %d\n", blockIdx.x, smid);
}
/*
long long int clock_offset = 0;
while (clock_offset < clock_count){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
value1 = value1 + threadIdx.x;
}
*/
B1[index] = value1;
}
}
__global__ void page_visitor1(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////long half
//thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 8388608;
//double temp = warpid * 1 * 16 + (threadIdx.x % 16) * 1;
double temp = (blockIdx.x * blockDim.x + threadIdx.x) * stride;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 2 + blockIdx.x * 1;
long long int index = __double2ll_rd(temp);
long long int value1;
if(blockIdx.x == 0){
value1 = A1[index];
B1[index] = value1;
}
}
__global__ void page_visitor2(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////mixed
//thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 8388608;
//double temp = warpid * 1 * 16 + (threadIdx.x % 16) * 1;
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 1;
double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * gridDim.x + blockIdx.x * stride;
long long int index = __double2ll_rd(temp);
long long int value1;
if(blockIdx.x == 0 || blockIdx.x == 32){
value1 = A1[index];
B1[index] = value1;
}
}
__global__ void page_visitor3(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////mixed half
thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * gridDim.x + blockIdx.x * stride;
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 512;
long long int index = __double2ll_rd(temp);
long long int value1;
//if(blockIdx.x == 0 || blockIdx.x == 32){
if(blockIdx.x == 0){
value1 = A1[index];
B1[index] = value1;
}
}
__global__ void page_visitor4(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////long pause
thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
double temp = (blockIdx.x * blockDim.x + threadIdx.x) * stride;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * gridDim.x + blockIdx.x * stride;
long long int index = __double2ll_rd(temp);
long long int value1 = 7;
//if(blockIdx.x == 0 || blockIdx.x == 32){
if(blockIdx.x == 0){
value1 = A1[index];
}
if(blockIdx.x == 32){
long long int clock_offset = 0;
while (clock_offset < 65536){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
//value1 = value1 * 3;
asm("mul.lo.s64 %0, %1, 7;" : "=l"(value1) : "l"(value1));
asm("div.s64 %0, %1, 3;" : "=l"(value1) : "l"(value1));
}
value1 = A1[index];
}
if(blockIdx.x == 0 || blockIdx.x == 32){
B1[index] = value1;
}
}
__global__ void page_visitor5(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////mixed pause
thread_block block = this_thread_block();
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1;
unsigned warpid;
asm("mov.u32 %0, %warpid;" : "=r"(warpid));
double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * gridDim.x + blockIdx.x * stride;
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 512;
long long int index = __double2ll_rd(temp);
long long int value1 = 7;
//if(blockIdx.x == 0 || blockIdx.x == 32){
if(blockIdx.x == 0){
value1 = A1[index];
}
if(blockIdx.x == 32){
long long int clock_offset = 0;
while (clock_offset < 65536){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
//value1 = value1 * 3;
asm("mul.lo.s64 %0, %1, 7;" : "=l"(value1) : "l"(value1));
asm("div.s64 %0, %1, 3;" : "=l"(value1) : "l"(value1));
}
value1 = A1[index];
}
if(blockIdx.x == 0 || blockIdx.x == 32){
B1[index] = value1;
}
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//long long int dev_id = findCudaDevice(argc, (const char **) argv);
long long int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
//int peak_clk = 1;//kHz
//checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
//float clock_rate = (float) peak_clk;
//printf("clock_rate:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
/*
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
*/
int value1 = 1;
checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id));
//printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1);
///*
//printf("############approach\n");
for(long long int time = 0; time <= 5; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 32; clock_count <= 32; clock_count = clock_count * 2){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
if(0){
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
int block_num = 56;
if(time == 0){
page_visitor<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long
}
if(time == 1){
page_visitor1<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long half
}
if(time == 2){
page_visitor2<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);//////mixed
}
if(time == 3){
page_visitor3<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);//////mixed half
}
if(time == 4){
page_visitor4<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long pause
}
if(time == 5){
page_visitor5<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);//////mixed long
}
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(cudaFree(CPU_data_in1));
checkCudaErrors(cudaFree(GPU_data_out1));
}
}
}
}
}
}
}
printf("\n");
//*/
exit(EXIT_SUCCESS);
} |
e63f798093d24772beb4282f2ae888103d39adf8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmark/benchmark.h>
#include <cudf/column/column.hpp>
#include <cudf/copying.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <tests/utilities/column_wrapper.hpp>
// to enable, run cmake with -DBUILD_BENCHMARKS=ON
class ContiguousSplit: public cudf::benchmark {};
void BM_contiguous_split(benchmark::State& state)
{
int64_t total_desired_bytes = state.range(0);
cudf::size_type num_cols = state.range(1);
cudf::size_type num_splits = state.range(2);
bool include_validity = state.range(3) == 0 ? false : true;
cudf::size_type el_size = 4; // ints and floats
int64_t num_rows = total_desired_bytes / (num_cols * el_size);
// generate splits
cudf::size_type split_stride = num_rows / num_splits;
std::vector<cudf::size_type> splits;
for(int idx=0; idx<num_rows; idx+=split_stride){
splits.push_back(::min(idx + split_stride, static_cast<cudf::size_type>(num_rows)));
}
// generate input table
srand(31337);
auto valids = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; });
std::vector<cudf::test::fixed_width_column_wrapper<int>> src_cols(num_cols);
for(int idx=0; idx<num_cols; idx++){
auto rand_elements = cudf::test::make_counting_transform_iterator(0, [](int i){return rand();});
if(include_validity){
src_cols[idx] = cudf::test::fixed_width_column_wrapper<int>(rand_elements, rand_elements + num_rows, valids);
} else {
src_cols[idx] = cudf::test::fixed_width_column_wrapper<int>(rand_elements, rand_elements + num_rows);
}
}
std::vector<std::unique_ptr<cudf::column>> columns(num_cols);
std::transform(src_cols.begin(), src_cols.end(), columns.begin(), [](cudf::test::fixed_width_column_wrapper<int> &in){
auto ret = in.release();
ret->set_null_count(0);
return ret;
});
cudf::experimental::table src_table(std::move(columns));
for(auto _ : state){
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
auto result = cudf::experimental::contiguous_split(src_table, splits);
}
state.SetBytesProcessed(
static_cast<int64_t>(state.iterations())*state.range(0));
}
#define CSBM_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ContiguousSplit, name)(::benchmark::State& state) { \
BM_contiguous_split(state); \
} \
BENCHMARK_REGISTER_F(ContiguousSplit, name)->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond)->UseManualTime() \
->Iterations(1);
CSBM_BENCHMARK_DEFINE(6Gb512ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(6Gb512ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(6Gb10ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(6Gb10ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 1); | e63f798093d24772beb4282f2ae888103d39adf8.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmark/benchmark.h>
#include <cudf/column/column.hpp>
#include <cudf/copying.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <tests/utilities/column_wrapper.hpp>
// to enable, run cmake with -DBUILD_BENCHMARKS=ON
class ContiguousSplit: public cudf::benchmark {};
void BM_contiguous_split(benchmark::State& state)
{
int64_t total_desired_bytes = state.range(0);
cudf::size_type num_cols = state.range(1);
cudf::size_type num_splits = state.range(2);
bool include_validity = state.range(3) == 0 ? false : true;
cudf::size_type el_size = 4; // ints and floats
int64_t num_rows = total_desired_bytes / (num_cols * el_size);
// generate splits
cudf::size_type split_stride = num_rows / num_splits;
std::vector<cudf::size_type> splits;
for(int idx=0; idx<num_rows; idx+=split_stride){
splits.push_back(std::min(idx + split_stride, static_cast<cudf::size_type>(num_rows)));
}
// generate input table
srand(31337);
auto valids = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; });
std::vector<cudf::test::fixed_width_column_wrapper<int>> src_cols(num_cols);
for(int idx=0; idx<num_cols; idx++){
auto rand_elements = cudf::test::make_counting_transform_iterator(0, [](int i){return rand();});
if(include_validity){
src_cols[idx] = cudf::test::fixed_width_column_wrapper<int>(rand_elements, rand_elements + num_rows, valids);
} else {
src_cols[idx] = cudf::test::fixed_width_column_wrapper<int>(rand_elements, rand_elements + num_rows);
}
}
std::vector<std::unique_ptr<cudf::column>> columns(num_cols);
std::transform(src_cols.begin(), src_cols.end(), columns.begin(), [](cudf::test::fixed_width_column_wrapper<int> &in){
auto ret = in.release();
ret->set_null_count(0);
return ret;
});
cudf::experimental::table src_table(std::move(columns));
for(auto _ : state){
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
auto result = cudf::experimental::contiguous_split(src_table, splits);
}
state.SetBytesProcessed(
static_cast<int64_t>(state.iterations())*state.range(0));
}
#define CSBM_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ContiguousSplit, name)(::benchmark::State& state) { \
BM_contiguous_split(state); \
} \
BENCHMARK_REGISTER_F(ContiguousSplit, name)->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond)->UseManualTime() \
->Iterations(1);
CSBM_BENCHMARK_DEFINE(6Gb512ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(6Gb512ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(6Gb10ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(6Gb10ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 1); |
86641a60faf018a91e3106306ffc0a268bc3cce5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "project.h"
#include "volume.h"
#include "graph.h"
char *programName;
int main(int argc, char *argv[])
{
programName = argv[0];
if (argc < 2)
{
printf("Usage:\n");
printf(" %s inputRawVolume outputRawVolume\n", programName);
exit(0);
}
// read our raw file into volume
Volume *volume;
readRaw(&volume, argv[1]);
printf("read volume from raw file. dimensions = (%d,%d,%d)\n", volume->width, volume->height, volume->depth);
Graph *graph;
cudaMallocGraph(&graph, volume->width, volume->height, volume->depth);
//printGraph(graph);
pushRelabel(graph, volume);
//printGraph(graph);
if (isMaxFlow(graph))
{
printf("flow verification succeeded: we found the max flow\n");
} else
{
printf("flow verification failed\n");
}
hipFree(volume->data);
// resize volume data so we only have to store 1 byte per voxel
volume->bytesPerPixel = 1;
cudaMallocManagedVolume(volume);
hipLaunchKernelGGL(( storeSegmentation), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, graph, volume);
hipDeviceSynchronize();
writeRaw(volume, argv[2]);
printf("wrote segmentation to %s\n", argv[2]);
hipFree(volume->data);
hipFree(volume);
hipFree(&(graph->voxels));
hipFree(graph);
}
| 86641a60faf018a91e3106306ffc0a268bc3cce5.cu | #include "project.h"
#include "volume.h"
#include "graph.h"
char *programName;
int main(int argc, char *argv[])
{
programName = argv[0];
if (argc < 2)
{
printf("Usage:\n");
printf(" %s inputRawVolume outputRawVolume\n", programName);
exit(0);
}
// read our raw file into volume
Volume *volume;
readRaw(&volume, argv[1]);
printf("read volume from raw file. dimensions = (%d,%d,%d)\n", volume->width, volume->height, volume->depth);
Graph *graph;
cudaMallocGraph(&graph, volume->width, volume->height, volume->depth);
//printGraph(graph);
pushRelabel(graph, volume);
//printGraph(graph);
if (isMaxFlow(graph))
{
printf("flow verification succeeded: we found the max flow\n");
} else
{
printf("flow verification failed\n");
}
cudaFree(volume->data);
// resize volume data so we only have to store 1 byte per voxel
volume->bytesPerPixel = 1;
cudaMallocManagedVolume(volume);
storeSegmentation<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>(graph, volume);
cudaDeviceSynchronize();
writeRaw(volume, argv[2]);
printf("wrote segmentation to %s\n", argv[2]);
cudaFree(volume->data);
cudaFree(volume);
cudaFree(&(graph->voxels));
cudaFree(graph);
}
|
61aff8a438dc0e595018493fefebd8ab92e0b898.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.h"
#include "CudaProj.h"
#include <stdlib.h>
#include <stdio.h>
#include <helper_functions.h> // helper for shared functions common to CUDA SDK samples
#include <helper_cuda.h> // helper function CUDA error checking and intialization
float* ReadFile(const char* name, int* count) {
FILE *file;
float *buffer;
unsigned long fileLen;
//Open file
file = fopen(name, "rb");
if (!file) {
fprintf(stderr, "Unable to open file %s", name);
exit(1);
}
//Get file length
fseek(file, 0, SEEK_END);
fileLen = ftell(file);
fseek(file, 0, SEEK_SET);
//Allocate memory
buffer = (float *) malloc(fileLen + 1);
if (!buffer) {
fprintf(stderr, "Memory error!");
fclose(file);
exit(2);
}
//Read file contents into buffer
char line[1024];
int _count = 0;
float testMin = INFINITY;
while (fgets(line, 1024, file)) {
sscanf(line, "%*s %f", &buffer[_count]);
//TODO remove below line
buffer[_count] = _count;
if (buffer[_count] < testMin) {
testMin = buffer[_count];
}
_count++;
}
_count = (_count / AGG_TEST_108) * AGG_TEST_108; //FIXME wyrwnanie
printf("\n~~~~~~~~~~~~~~ TEST MIN: %f ~~~~~~~~~~~~~~\n", testMin);
float* goodArray = (float*) malloc(sizeof(float) * _count);
memcpy(goodArray, buffer, _count * sizeof(float));
*count = _count;
fclose(file);
free(buffer);
//Do what ever with buffer
return goodArray;
}
float* getMockData(const int count) {
float* buffer = (float *) malloc(count*sizeof(float));
for (int i = 0; i < count; i++) {
buffer[i] = i;
}
return buffer;
}
void testIO() {
int count;
float* buffer = ReadFile("data/Osoba_concat.txt", &count);
printf("Lines loaded: %d\n", count);
}
int initCuda(int argc, char ** argv) {
hipDeviceProp_t deviceProp;
int devID = 0;
if (argc >= 6) {
devID = atoi(argv[5]);
}
printf("\n====================\nworking on device ID: %d\n====================\n", devID);
hipSetDevice(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n", deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11) {
printf("%s: requires a minimum CUDA compute 1.1 capability\n", "Adam Borowski");
hipDeviceReset();
exit(EXIT_SUCCESS);
}
return devID;
}
double mclock() {
struct timeval tp;
double sec, usec;
gettimeofday(&tp, NULL);
sec = double(tp.tv_sec);
usec = double(tp.tv_usec) / 1E6;
return sec + usec;
}
Timer createTimer() {
Timer timer;
hipEventCreate(&timer.startEvent);
hipEventCreate(&timer.stopEvent);
hipEventRecord(timer.startEvent, 0);
timer.duration = 0;
return timer;
}
float tickTimer(Timer* timer) {
hipEventRecord(timer->stopEvent, 0);
hipEventSynchronize(timer->stopEvent);
hipEventElapsedTime(&timer->duration, timer->startEvent, timer->stopEvent);
return timer->duration;
}
| 61aff8a438dc0e595018493fefebd8ab92e0b898.cu | #include "utils.h"
#include "CudaProj.h"
#include <stdlib.h>
#include <stdio.h>
#include <helper_functions.h> // helper for shared functions common to CUDA SDK samples
#include <helper_cuda.h> // helper function CUDA error checking and intialization
float* ReadFile(const char* name, int* count) {
FILE *file;
float *buffer;
unsigned long fileLen;
//Open file
file = fopen(name, "rb");
if (!file) {
fprintf(stderr, "Unable to open file %s", name);
exit(1);
}
//Get file length
fseek(file, 0, SEEK_END);
fileLen = ftell(file);
fseek(file, 0, SEEK_SET);
//Allocate memory
buffer = (float *) malloc(fileLen + 1);
if (!buffer) {
fprintf(stderr, "Memory error!");
fclose(file);
exit(2);
}
//Read file contents into buffer
char line[1024];
int _count = 0;
float testMin = INFINITY;
while (fgets(line, 1024, file)) {
sscanf(line, "%*s %f", &buffer[_count]);
//TODO remove below line
buffer[_count] = _count;
if (buffer[_count] < testMin) {
testMin = buffer[_count];
}
_count++;
}
_count = (_count / AGG_TEST_108) * AGG_TEST_108; //FIXME wyrównanie
printf("\n~~~~~~~~~~~~~~ TEST MIN: %f ~~~~~~~~~~~~~~\n", testMin);
float* goodArray = (float*) malloc(sizeof(float) * _count);
memcpy(goodArray, buffer, _count * sizeof(float));
*count = _count;
fclose(file);
free(buffer);
//Do what ever with buffer
return goodArray;
}
float* getMockData(const int count) {
float* buffer = (float *) malloc(count*sizeof(float));
for (int i = 0; i < count; i++) {
buffer[i] = i;
}
return buffer;
}
void testIO() {
int count;
float* buffer = ReadFile("data/Osoba_concat.txt", &count);
printf("Lines loaded: %d\n", count);
}
int initCuda(int argc, char ** argv) {
cudaDeviceProp deviceProp;
int devID = 0;
if (argc >= 6) {
devID = atoi(argv[5]);
}
printf("\n====================\nworking on device ID: %d\n====================\n", devID);
cudaSetDevice(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n", deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11) {
printf("%s: requires a minimum CUDA compute 1.1 capability\n", "Adam Borowski");
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
return devID;
}
double mclock() {
struct timeval tp;
double sec, usec;
gettimeofday(&tp, NULL);
sec = double(tp.tv_sec);
usec = double(tp.tv_usec) / 1E6;
return sec + usec;
}
Timer createTimer() {
Timer timer;
cudaEventCreate(&timer.startEvent);
cudaEventCreate(&timer.stopEvent);
cudaEventRecord(timer.startEvent, 0);
timer.duration = 0;
return timer;
}
float tickTimer(Timer* timer) {
cudaEventRecord(timer->stopEvent, 0);
cudaEventSynchronize(timer->stopEvent);
cudaEventElapsedTime(&timer->duration, timer->startEvent, timer->stopEvent);
return timer->duration;
}
|
410da2908e2614eaab3a3bbb5646c2a51d1a2443.hip | // !!! This is a file automatically generated by hipify!!!
extern "C" {
#include "cuda_util.h"
__host__ void
cuda_check(hipError_t err, const char* file, const int line, bool fatal) {
if (hipSuccess != err) {
printf("[!] GPU error: %s:%d, code: %d, reason: %s\n",
file, line, err, hipGetErrorString(err));
if (fatal) {
printf("[!] aborting...\n");
exit(err);
}
}
}
__host__ void
check_gpu(const hipDeviceProp_t* const dev_prop) {
printf("[+] CUDA info:\n");
int cuda_rtv;
CUDA_CHECK(hipRuntimeGetVersion(&cuda_rtv));
printf("\t\tRuntime version: %d.%d\n", cuda_rtv / 1000,
(cuda_rtv % 100) / 10);
printf("\t\tCapability: %d.%d\n", dev_prop->major, dev_prop->minor);
printf("\t\tGlobal memory size: %d Mbytes\n", dev_prop->totalGlobalMem / (1024*1024));
printf("\t\tGPU clock rate: %0.2f MHz\n", dev_prop->clockRate / 1000.0f);
printf("\t\tMemory clock rate: %0.2f MHz\n",
dev_prop->memoryClockRate / 1000.0f);
printf("\t\tMemory bus width: %d bits\n", dev_prop->memoryBusWidth);
printf("\t\tMax memory pitch: %d bytes\n",
dev_prop->memPitch);
printf("\t\tL1 support global cache? %s\n",
dev_prop->globalL1CacheSupported ? "yes" : "no");
printf("\t\tL1 support local cache? %s\n",
dev_prop->localL1CacheSupported ? "yes" : "no");
printf("\t\tL2 cache size: %d bytes\n", dev_prop->l2CacheSize);
printf("\t\tConstant memory size: %lu Mbytes\n", dev_prop->totalConstMem / (1024*1024));
printf("\t\tShared memory size per block: %lu bytes\n",
dev_prop->sharedMemPerBlock);
printf("\t\tNumber of registers available per block: %d\n",
dev_prop->regsPerBlock);
printf("\t\tMax number of threads per block: %d\n",
dev_prop->maxThreadsPerBlock);
printf("\t\tNumber of registers available per thread: %d\n",
dev_prop->regsPerBlock / dev_prop->maxThreadsPerBlock);
printf("\t\tWarp size: %d\n", dev_prop->warpSize);
printf("\t\tMax number of threads per multiprocessor: %d\n",
dev_prop->maxThreadsPerMultiProcessor);
printf("\t\tNumber of multiprocessors: %d\n", dev_prop->multiProcessorCount);
printf("\t\tMax sizes of each dimension of a block: (%d x %d x %d)\n",
dev_prop->maxThreadsDim[0],
dev_prop->maxThreadsDim[1],
dev_prop->maxThreadsDim[2]);
printf("\t\tMax sizes of each dimension of a grid: (%d x %d x %d)\n",
dev_prop->maxGridSize[0],
dev_prop->maxGridSize[1],
dev_prop->maxGridSize[2]);
printf("\t\tConcurrent copy and execution? %s\n",
dev_prop->deviceOverlap ? "yes" : "no");
printf("\t\tLaunch concurrent kernels? %s\n",
dev_prop->concurrentKernels ? "yes" : "no");
//printf("\t\tSingle/Double precision performance ratio: %d\n",
// dev_prop->singleToDoublePrecisionPerfRatio);
printf("\t\tNumber of asynchronous engines: %d\n",
dev_prop->asyncEngineCount);
//printf("\t\tNative atomic operations between host and device?: %s\n",
// dev_prop->hostNativeAtomicSupported ? "yes" : "no");
printf("[+] end CUDA info\n");
}
}
| 410da2908e2614eaab3a3bbb5646c2a51d1a2443.cu | extern "C" {
#include "cuda_util.h"
__host__ void
cuda_check(cudaError_t err, const char* file, const int line, bool fatal) {
if (cudaSuccess != err) {
printf("[!] GPU error: %s:%d, code: %d, reason: %s\n",
file, line, err, cudaGetErrorString(err));
if (fatal) {
printf("[!] aborting...\n");
exit(err);
}
}
}
__host__ void
check_gpu(const cudaDeviceProp* const dev_prop) {
printf("[+] CUDA info:\n");
int cuda_rtv;
CUDA_CHECK(cudaRuntimeGetVersion(&cuda_rtv));
printf("\t\tRuntime version: %d.%d\n", cuda_rtv / 1000,
(cuda_rtv % 100) / 10);
printf("\t\tCapability: %d.%d\n", dev_prop->major, dev_prop->minor);
printf("\t\tGlobal memory size: %d Mbytes\n", dev_prop->totalGlobalMem / (1024*1024));
printf("\t\tGPU clock rate: %0.2f MHz\n", dev_prop->clockRate / 1000.0f);
printf("\t\tMemory clock rate: %0.2f MHz\n",
dev_prop->memoryClockRate / 1000.0f);
printf("\t\tMemory bus width: %d bits\n", dev_prop->memoryBusWidth);
printf("\t\tMax memory pitch: %d bytes\n",
dev_prop->memPitch);
printf("\t\tL1 support global cache? %s\n",
dev_prop->globalL1CacheSupported ? "yes" : "no");
printf("\t\tL1 support local cache? %s\n",
dev_prop->localL1CacheSupported ? "yes" : "no");
printf("\t\tL2 cache size: %d bytes\n", dev_prop->l2CacheSize);
printf("\t\tConstant memory size: %lu Mbytes\n", dev_prop->totalConstMem / (1024*1024));
printf("\t\tShared memory size per block: %lu bytes\n",
dev_prop->sharedMemPerBlock);
printf("\t\tNumber of registers available per block: %d\n",
dev_prop->regsPerBlock);
printf("\t\tMax number of threads per block: %d\n",
dev_prop->maxThreadsPerBlock);
printf("\t\tNumber of registers available per thread: %d\n",
dev_prop->regsPerBlock / dev_prop->maxThreadsPerBlock);
printf("\t\tWarp size: %d\n", dev_prop->warpSize);
printf("\t\tMax number of threads per multiprocessor: %d\n",
dev_prop->maxThreadsPerMultiProcessor);
printf("\t\tNumber of multiprocessors: %d\n", dev_prop->multiProcessorCount);
printf("\t\tMax sizes of each dimension of a block: (%d x %d x %d)\n",
dev_prop->maxThreadsDim[0],
dev_prop->maxThreadsDim[1],
dev_prop->maxThreadsDim[2]);
printf("\t\tMax sizes of each dimension of a grid: (%d x %d x %d)\n",
dev_prop->maxGridSize[0],
dev_prop->maxGridSize[1],
dev_prop->maxGridSize[2]);
printf("\t\tConcurrent copy and execution? %s\n",
dev_prop->deviceOverlap ? "yes" : "no");
printf("\t\tLaunch concurrent kernels? %s\n",
dev_prop->concurrentKernels ? "yes" : "no");
//printf("\t\tSingle/Double precision performance ratio: %d\n",
// dev_prop->singleToDoublePrecisionPerfRatio);
printf("\t\tNumber of asynchronous engines: %d\n",
dev_prop->asyncEngineCount);
//printf("\t\tNative atomic operations between host and device?: %s\n",
// dev_prop->hostNativeAtomicSupported ? "yes" : "no");
printf("[+] end CUDA info\n");
}
}
|
285909960cb1f8ed8753ec0cbb1bc8d35d9f8bd7.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) SenseTime Research. All rights reserved.
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, visit
// https://nvlabs.github.io/stylegan2/license.html
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
static __host__ __device__ __forceinline__ int floor_div(int a, int b) {
int c = a / b;
if (c * b > a) {
c--;
}
return c;
}
struct UpFirDn2DKernelParams {
int up_x;
int up_y;
int down_x;
int down_y;
int pad_x0;
int pad_x1;
int pad_y0;
int pad_y1;
int major_dim;
int in_h;
int in_w;
int minor_dim;
int kernel_h;
int kernel_w;
int out_h;
int out_w;
int loop_major;
int loop_x;
};
template <typename scalar_t>
__global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input,
const scalar_t *kernel,
const UpFirDn2DKernelParams p) {
int minor_idx = blockIdx.x * blockDim.x + threadIdx.x;
int out_y = minor_idx / p.minor_dim;
minor_idx -= out_y * p.minor_dim;
int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y;
int major_idx_base = blockIdx.z * p.loop_major;
if (out_x_base >= p.out_w || out_y >= p.out_h ||
major_idx_base >= p.major_dim) {
return;
}
int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0;
int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h);
int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y;
int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y;
for (int loop_major = 0, major_idx = major_idx_base;
loop_major < p.loop_major && major_idx < p.major_dim;
loop_major++, major_idx++) {
for (int loop_x = 0, out_x = out_x_base;
loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) {
int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0;
int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w);
int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x;
int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x;
const scalar_t *x_p =
&input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim +
minor_idx];
const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x];
int x_px = p.minor_dim;
int k_px = -p.up_x;
int x_py = p.in_w * p.minor_dim;
int k_py = -p.up_y * p.kernel_w;
scalar_t v = 0.0f;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
v += static_cast<scalar_t>(*x_p) * static_cast<scalar_t>(*k_p);
x_p += x_px;
k_p += k_px;
}
x_p += x_py - w * x_px;
k_p += k_py - w * k_px;
}
out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim +
minor_idx] = v;
}
}
}
template <typename scalar_t, int up_x, int up_y, int down_x, int down_y,
int kernel_h, int kernel_w, int tile_out_h, int tile_out_w>
__global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input,
const scalar_t *kernel,
const UpFirDn2DKernelParams p) {
const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1;
const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1;
__shared__ volatile float sk[kernel_h][kernel_w];
__shared__ volatile float sx[tile_in_h][tile_in_w];
int minor_idx = blockIdx.x;
int tile_out_y = minor_idx / p.minor_dim;
minor_idx -= tile_out_y * p.minor_dim;
tile_out_y *= tile_out_h;
int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w;
int major_idx_base = blockIdx.z * p.loop_major;
if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h |
major_idx_base >= p.major_dim) {
return;
}
for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w;
tap_idx += blockDim.x) {
int ky = tap_idx / kernel_w;
int kx = tap_idx - ky * kernel_w;
scalar_t v = 0.0;
if (kx < p.kernel_w & ky < p.kernel_h) {
v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)];
}
sk[ky][kx] = v;
}
for (int loop_major = 0, major_idx = major_idx_base;
loop_major < p.loop_major & major_idx < p.major_dim;
loop_major++, major_idx++) {
for (int loop_x = 0, tile_out_x = tile_out_x_base;
loop_x < p.loop_x & tile_out_x < p.out_w;
loop_x++, tile_out_x += tile_out_w) {
int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0;
int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0;
int tile_in_x = floor_div(tile_mid_x, up_x);
int tile_in_y = floor_div(tile_mid_y, up_y);
__syncthreads();
for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w;
in_idx += blockDim.x) {
int rel_in_y = in_idx / tile_in_w;
int rel_in_x = in_idx - rel_in_y * tile_in_w;
int in_x = rel_in_x + tile_in_x;
int in_y = rel_in_y + tile_in_y;
scalar_t v = 0.0;
if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) {
v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) *
p.minor_dim +
minor_idx];
}
sx[rel_in_y][rel_in_x] = v;
}
__syncthreads();
for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w;
out_idx += blockDim.x) {
int rel_out_y = out_idx / tile_out_w;
int rel_out_x = out_idx - rel_out_y * tile_out_w;
int out_x = rel_out_x + tile_out_x;
int out_y = rel_out_y + tile_out_y;
int mid_x = tile_mid_x + rel_out_x * down_x;
int mid_y = tile_mid_y + rel_out_y * down_y;
int in_x = floor_div(mid_x, up_x);
int in_y = floor_div(mid_y, up_y);
int rel_in_x = in_x - tile_in_x;
int rel_in_y = in_y - tile_in_y;
int kernel_x = (in_x + 1) * up_x - mid_x - 1;
int kernel_y = (in_y + 1) * up_y - mid_y - 1;
scalar_t v = 0.0;
#pragma unroll
for (int y = 0; y < kernel_h / up_y; y++)
#pragma unroll
for (int x = 0; x < kernel_w / up_x; x++)
v += sx[rel_in_y + y][rel_in_x + x] *
sk[kernel_y + y * up_y][kernel_x + x * up_x];
if (out_x < p.out_w & out_y < p.out_h) {
out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim +
minor_idx] = v;
}
}
}
}
}
torch::Tensor upfirdn2d_op(const torch::Tensor &input,
const torch::Tensor &kernel, int up_x, int up_y,
int down_x, int down_y, int pad_x0, int pad_x1,
int pad_y0, int pad_y1) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
UpFirDn2DKernelParams p;
auto x = input.contiguous();
auto k = kernel.contiguous();
p.major_dim = x.size(0);
p.in_h = x.size(1);
p.in_w = x.size(2);
p.minor_dim = x.size(3);
p.kernel_h = k.size(0);
p.kernel_w = k.size(1);
p.up_x = up_x;
p.up_y = up_y;
p.down_x = down_x;
p.down_y = down_y;
p.pad_x0 = pad_x0;
p.pad_x1 = pad_x1;
p.pad_y0 = pad_y0;
p.pad_y1 = pad_y1;
p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) /
p.down_y;
p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) /
p.down_x;
auto out =
at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options());
int mode = -1;
int tile_out_h = -1;
int tile_out_w = -1;
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 1;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 3 && p.kernel_w <= 3) {
mode = 2;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 3;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 2 && p.kernel_w <= 2) {
mode = 4;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 5;
tile_out_h = 8;
tile_out_w = 32;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 &&
p.kernel_h <= 2 && p.kernel_w <= 2) {
mode = 6;
tile_out_h = 8;
tile_out_w = 32;
}
dim3 block_size;
dim3 grid_size;
if (tile_out_h > 0 && tile_out_w > 0) {
p.loop_major = (p.major_dim - 1) / 16384 + 1;
p.loop_x = 1;
block_size = dim3(32 * 8, 1, 1);
grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim,
(p.out_w - 1) / (p.loop_x * tile_out_w) + 1,
(p.major_dim - 1) / p.loop_major + 1);
} else {
p.loop_major = (p.major_dim - 1) / 16384 + 1;
p.loop_x = 4;
block_size = dim3(4, 32, 1);
grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1,
(p.out_w - 1) / (p.loop_x * block_size.y) + 1,
(p.major_dim - 1) / p.loop_major + 1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] {
switch (mode) {
case 1:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 2:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 3:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 4:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 5:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 6:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
default:
hipLaunchKernelGGL(( upfirdn2d_kernel_large<scalar_t>), dim3(grid_size), dim3(block_size), 0, stream,
out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
}
});
return out;
} | 285909960cb1f8ed8753ec0cbb1bc8d35d9f8bd7.cu | // Copyright (c) SenseTime Research. All rights reserved.
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, visit
// https://nvlabs.github.io/stylegan2/license.html
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
static __host__ __device__ __forceinline__ int floor_div(int a, int b) {
int c = a / b;
if (c * b > a) {
c--;
}
return c;
}
struct UpFirDn2DKernelParams {
int up_x;
int up_y;
int down_x;
int down_y;
int pad_x0;
int pad_x1;
int pad_y0;
int pad_y1;
int major_dim;
int in_h;
int in_w;
int minor_dim;
int kernel_h;
int kernel_w;
int out_h;
int out_w;
int loop_major;
int loop_x;
};
template <typename scalar_t>
__global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input,
const scalar_t *kernel,
const UpFirDn2DKernelParams p) {
int minor_idx = blockIdx.x * blockDim.x + threadIdx.x;
int out_y = minor_idx / p.minor_dim;
minor_idx -= out_y * p.minor_dim;
int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y;
int major_idx_base = blockIdx.z * p.loop_major;
if (out_x_base >= p.out_w || out_y >= p.out_h ||
major_idx_base >= p.major_dim) {
return;
}
int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0;
int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h);
int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y;
int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y;
for (int loop_major = 0, major_idx = major_idx_base;
loop_major < p.loop_major && major_idx < p.major_dim;
loop_major++, major_idx++) {
for (int loop_x = 0, out_x = out_x_base;
loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) {
int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0;
int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w);
int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x;
int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x;
const scalar_t *x_p =
&input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim +
minor_idx];
const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x];
int x_px = p.minor_dim;
int k_px = -p.up_x;
int x_py = p.in_w * p.minor_dim;
int k_py = -p.up_y * p.kernel_w;
scalar_t v = 0.0f;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
v += static_cast<scalar_t>(*x_p) * static_cast<scalar_t>(*k_p);
x_p += x_px;
k_p += k_px;
}
x_p += x_py - w * x_px;
k_p += k_py - w * k_px;
}
out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim +
minor_idx] = v;
}
}
}
template <typename scalar_t, int up_x, int up_y, int down_x, int down_y,
int kernel_h, int kernel_w, int tile_out_h, int tile_out_w>
__global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input,
const scalar_t *kernel,
const UpFirDn2DKernelParams p) {
const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1;
const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1;
__shared__ volatile float sk[kernel_h][kernel_w];
__shared__ volatile float sx[tile_in_h][tile_in_w];
int minor_idx = blockIdx.x;
int tile_out_y = minor_idx / p.minor_dim;
minor_idx -= tile_out_y * p.minor_dim;
tile_out_y *= tile_out_h;
int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w;
int major_idx_base = blockIdx.z * p.loop_major;
if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h |
major_idx_base >= p.major_dim) {
return;
}
for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w;
tap_idx += blockDim.x) {
int ky = tap_idx / kernel_w;
int kx = tap_idx - ky * kernel_w;
scalar_t v = 0.0;
if (kx < p.kernel_w & ky < p.kernel_h) {
v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)];
}
sk[ky][kx] = v;
}
for (int loop_major = 0, major_idx = major_idx_base;
loop_major < p.loop_major & major_idx < p.major_dim;
loop_major++, major_idx++) {
for (int loop_x = 0, tile_out_x = tile_out_x_base;
loop_x < p.loop_x & tile_out_x < p.out_w;
loop_x++, tile_out_x += tile_out_w) {
int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0;
int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0;
int tile_in_x = floor_div(tile_mid_x, up_x);
int tile_in_y = floor_div(tile_mid_y, up_y);
__syncthreads();
for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w;
in_idx += blockDim.x) {
int rel_in_y = in_idx / tile_in_w;
int rel_in_x = in_idx - rel_in_y * tile_in_w;
int in_x = rel_in_x + tile_in_x;
int in_y = rel_in_y + tile_in_y;
scalar_t v = 0.0;
if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) {
v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) *
p.minor_dim +
minor_idx];
}
sx[rel_in_y][rel_in_x] = v;
}
__syncthreads();
for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w;
out_idx += blockDim.x) {
int rel_out_y = out_idx / tile_out_w;
int rel_out_x = out_idx - rel_out_y * tile_out_w;
int out_x = rel_out_x + tile_out_x;
int out_y = rel_out_y + tile_out_y;
int mid_x = tile_mid_x + rel_out_x * down_x;
int mid_y = tile_mid_y + rel_out_y * down_y;
int in_x = floor_div(mid_x, up_x);
int in_y = floor_div(mid_y, up_y);
int rel_in_x = in_x - tile_in_x;
int rel_in_y = in_y - tile_in_y;
int kernel_x = (in_x + 1) * up_x - mid_x - 1;
int kernel_y = (in_y + 1) * up_y - mid_y - 1;
scalar_t v = 0.0;
#pragma unroll
for (int y = 0; y < kernel_h / up_y; y++)
#pragma unroll
for (int x = 0; x < kernel_w / up_x; x++)
v += sx[rel_in_y + y][rel_in_x + x] *
sk[kernel_y + y * up_y][kernel_x + x * up_x];
if (out_x < p.out_w & out_y < p.out_h) {
out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim +
minor_idx] = v;
}
}
}
}
}
torch::Tensor upfirdn2d_op(const torch::Tensor &input,
const torch::Tensor &kernel, int up_x, int up_y,
int down_x, int down_y, int pad_x0, int pad_x1,
int pad_y0, int pad_y1) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
UpFirDn2DKernelParams p;
auto x = input.contiguous();
auto k = kernel.contiguous();
p.major_dim = x.size(0);
p.in_h = x.size(1);
p.in_w = x.size(2);
p.minor_dim = x.size(3);
p.kernel_h = k.size(0);
p.kernel_w = k.size(1);
p.up_x = up_x;
p.up_y = up_y;
p.down_x = down_x;
p.down_y = down_y;
p.pad_x0 = pad_x0;
p.pad_x1 = pad_x1;
p.pad_y0 = pad_y0;
p.pad_y1 = pad_y1;
p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) /
p.down_y;
p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) /
p.down_x;
auto out =
at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options());
int mode = -1;
int tile_out_h = -1;
int tile_out_w = -1;
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 1;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 3 && p.kernel_w <= 3) {
mode = 2;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 3;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 2 && p.kernel_w <= 2) {
mode = 4;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 5;
tile_out_h = 8;
tile_out_w = 32;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 &&
p.kernel_h <= 2 && p.kernel_w <= 2) {
mode = 6;
tile_out_h = 8;
tile_out_w = 32;
}
dim3 block_size;
dim3 grid_size;
if (tile_out_h > 0 && tile_out_w > 0) {
p.loop_major = (p.major_dim - 1) / 16384 + 1;
p.loop_x = 1;
block_size = dim3(32 * 8, 1, 1);
grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim,
(p.out_w - 1) / (p.loop_x * tile_out_w) + 1,
(p.major_dim - 1) / p.loop_major + 1);
} else {
p.loop_major = (p.major_dim - 1) / 16384 + 1;
p.loop_x = 4;
block_size = dim3(4, 32, 1);
grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1,
(p.out_w - 1) / (p.loop_x * block_size.y) + 1,
(p.major_dim - 1) / p.loop_major + 1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] {
switch (mode) {
case 1:
upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 2:
upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 3:
upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 4:
upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 5:
upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 6:
upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
default:
upfirdn2d_kernel_large<scalar_t><<<grid_size, block_size, 0, stream>>>(
out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
}
});
return out;
} |
60df58431dd28a05cad442d9dabc9b300b067bc3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h> // for printf
#define N 64 // constant, threads per block
#define TPB 32 // constant, threads per block
// converts int to evenly spaced floats
// ie) .1, .2, ..., .5, ..., .9
float scale(int i, int n)
{
return ((float) i) / (n - 1);
}
// Computes distance between 2 points on a line
__device__
float distance(float x1, float x2)
{
return sqrt((x2 - x1) * (x2 - x1));
}
__global__
void distanceKernel(float *d_out, float *d_in, float ref)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = d_in[i];
d_out[i] = distance(x, ref);
printf("i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]);
}
// Auto run main method
int main()
{
float ref = 0.5f;
// declare pointers to device arrays
float *in = 0;
float *out = 0;
// allocate device memory to device arrays
hipMallocManaged(&in, N * sizeof(float));
hipMallocManaged(&out, N * sizeof(float));
// launch kernel to copute and store distance values
for(int i = 0; i < N; i++)
{
in[i] = scale(i, N);
}
// launch kernel to compute and store distance vals
hipLaunchKernelGGL(( distanceKernel), dim3(N/TPB), dim3(TPB), 0, 0, out, in, ref);
hipDeviceSynchronize();
// free memory for device arrays
hipFree(in);
hipFree(out);
return 0;
}
| 60df58431dd28a05cad442d9dabc9b300b067bc3.cu | #include <stdio.h> // for printf
#define N 64 // constant, threads per block
#define TPB 32 // constant, threads per block
// converts int to evenly spaced floats
// ie) .1, .2, ..., .5, ..., .9
float scale(int i, int n)
{
return ((float) i) / (n - 1);
}
// Computes distance between 2 points on a line
__device__
float distance(float x1, float x2)
{
return sqrt((x2 - x1) * (x2 - x1));
}
__global__
void distanceKernel(float *d_out, float *d_in, float ref)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = d_in[i];
d_out[i] = distance(x, ref);
printf("i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]);
}
// Auto run main method
int main()
{
float ref = 0.5f;
// declare pointers to device arrays
float *in = 0;
float *out = 0;
// allocate device memory to device arrays
cudaMallocManaged(&in, N * sizeof(float));
cudaMallocManaged(&out, N * sizeof(float));
// launch kernel to copute and store distance values
for(int i = 0; i < N; i++)
{
in[i] = scale(i, N);
}
// launch kernel to compute and store distance vals
distanceKernel<<<N/TPB, TPB>>>(out, in, ref);
cudaDeviceSynchronize();
// free memory for device arrays
cudaFree(in);
cudaFree(out);
return 0;
}
|
220a113eda4f37162a1b63222ad69afcf16a3623.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hip/hip_runtime.h>
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/platform/gpu_info.h"
template <typename T>
using vec = paddle::framework::Vector<T>;
TEST(mixed_vector, CPU_VECTOR) {
vec<int> tmp;
for (int i = 0; i < 10; ++i) {
tmp.push_back(i);
}
ASSERT_EQ(tmp.size(), 10UL);
vec<int> tmp2;
tmp2 = tmp;
ASSERT_EQ(tmp2.size(), 10UL);
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(tmp2[i], i);
ASSERT_EQ(tmp2[i], tmp[i]);
}
int cnt = 0;
for (auto& t : tmp2) {
ASSERT_EQ(t, cnt);
++cnt;
}
}
static __global__ void multiply_10(int* ptr) {
for (int i = 0; i < 10; ++i) {
ptr[i] *= 10;
}
}
hipStream_t GetCUDAStream(paddle::platform::CUDAPlace place) {
return reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
}
TEST(mixed_vector, GPU_VECTOR) {
vec<int> tmp;
for (int i = 0; i < 10; ++i) {
tmp.push_back(i);
}
ASSERT_EQ(tmp.size(), 10UL);
paddle::platform::CUDAPlace gpu(0);
hipLaunchKernelGGL(( multiply_10), dim3(1), dim3(1), 0, GetCUDAStream(gpu), tmp.MutableData(gpu));
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(tmp[i], i * 10);
}
}
TEST(mixed_vector, MultiGPU) {
if (paddle::platform::GetCUDADeviceCount() < 2) {
LOG(WARNING) << "Skip mixed_vector.MultiGPU since there are not multiple "
"GPUs in your machine.";
return;
}
vec<int> tmp;
for (int i = 0; i < 10; ++i) {
tmp.push_back(i);
}
ASSERT_EQ(tmp.size(), 10UL);
paddle::platform::CUDAPlace gpu0(0);
paddle::platform::SetDeviceId(0);
hipLaunchKernelGGL(( multiply_10), dim3(1), dim3(1), 0, GetCUDAStream(gpu0), tmp.MutableData(gpu0));
paddle::platform::CUDAPlace gpu1(1);
auto* gpu1_ptr = tmp.MutableData(gpu1);
paddle::platform::SetDeviceId(1);
hipLaunchKernelGGL(( multiply_10), dim3(1), dim3(1), 0, GetCUDAStream(gpu1), gpu1_ptr);
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(tmp[i], i * 100);
}
}
TEST(mixed_vector, InitWithCount) {
paddle::framework::Vector<int> vec(10, 10);
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(vec[i], 10);
}
}
TEST(mixed_vector, ForEach) {
vec<int> tmp;
for (auto& v : tmp) {
}
}
| 220a113eda4f37162a1b63222ad69afcf16a3623.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda_runtime.h>
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/platform/gpu_info.h"
template <typename T>
using vec = paddle::framework::Vector<T>;
TEST(mixed_vector, CPU_VECTOR) {
vec<int> tmp;
for (int i = 0; i < 10; ++i) {
tmp.push_back(i);
}
ASSERT_EQ(tmp.size(), 10UL);
vec<int> tmp2;
tmp2 = tmp;
ASSERT_EQ(tmp2.size(), 10UL);
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(tmp2[i], i);
ASSERT_EQ(tmp2[i], tmp[i]);
}
int cnt = 0;
for (auto& t : tmp2) {
ASSERT_EQ(t, cnt);
++cnt;
}
}
static __global__ void multiply_10(int* ptr) {
for (int i = 0; i < 10; ++i) {
ptr[i] *= 10;
}
}
cudaStream_t GetCUDAStream(paddle::platform::CUDAPlace place) {
return reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
}
TEST(mixed_vector, GPU_VECTOR) {
vec<int> tmp;
for (int i = 0; i < 10; ++i) {
tmp.push_back(i);
}
ASSERT_EQ(tmp.size(), 10UL);
paddle::platform::CUDAPlace gpu(0);
multiply_10<<<1, 1, 0, GetCUDAStream(gpu)>>>(tmp.MutableData(gpu));
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(tmp[i], i * 10);
}
}
TEST(mixed_vector, MultiGPU) {
if (paddle::platform::GetCUDADeviceCount() < 2) {
LOG(WARNING) << "Skip mixed_vector.MultiGPU since there are not multiple "
"GPUs in your machine.";
return;
}
vec<int> tmp;
for (int i = 0; i < 10; ++i) {
tmp.push_back(i);
}
ASSERT_EQ(tmp.size(), 10UL);
paddle::platform::CUDAPlace gpu0(0);
paddle::platform::SetDeviceId(0);
multiply_10<<<1, 1, 0, GetCUDAStream(gpu0)>>>(tmp.MutableData(gpu0));
paddle::platform::CUDAPlace gpu1(1);
auto* gpu1_ptr = tmp.MutableData(gpu1);
paddle::platform::SetDeviceId(1);
multiply_10<<<1, 1, 0, GetCUDAStream(gpu1)>>>(gpu1_ptr);
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(tmp[i], i * 100);
}
}
TEST(mixed_vector, InitWithCount) {
paddle::framework::Vector<int> vec(10, 10);
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(vec[i], 10);
}
}
TEST(mixed_vector, ForEach) {
vec<int> tmp;
for (auto& v : tmp) {
}
}
|
3cce687bae94f666465c3f54af2de851e6300276.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense matrix-sparse/dense matrix addition Z=CuMatlab_add(Sparse/Dense(X),Sparse/Dense(Y), alpha, beta).
* Z= alpha*X+beta*Y
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include "CuMatlab_sparseSSR.cu"
#include "CuMatlab_sparseSSC.cu"
#include "CuMatlab_sparseDDR.cu"
#include "CuMatlab_sparseDDC.cu"
#include "CuMatlab_sparseSDR.cu"
#include "CuMatlab_sparseSDC.cu"
#include "CuMatlab_sparseDSR.cu"
#include "CuMatlab_sparseDSC.cu"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
extern "C" static void mexCuMatlab_sparseSSR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSSC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDDR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDDC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSDR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSDC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDSR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDSC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
if (nrhs==4 && nlhs==1) {
if (mxIsGPUArray(prhs[0]) && mxIsGPUArray(prhs[1])) {
mxGPUArray const *tempGPU1;
tempGPU1 = mxGPUCreateFromMxArray(prhs[0]);
mxGPUArray const *tempGPU2;
tempGPU2 = mxGPUCreateFromMxArray(prhs[1]);
mxGPUArray const *tempGPU3;
tempGPU3 = mxGPUCreateFromMxArray(prhs[2]);
mxGPUArray const *tempGPU4;
tempGPU4 = mxGPUCreateFromMxArray(prhs[3]);
if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxREAL) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxREAL) ){
if ( mxGPUGetComplexity(tempGPU3) != mxREAL || mxGPUGetComplexity(tempGPU4) != mxREAL) {
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be scalar and double precision. %s\n");
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseSSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseDDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseSDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseDSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
}
else if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxCOMPLEX) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxCOMPLEX) ){
if ( mxGPUGetComplexity(tempGPU3) != mxCOMPLEX || mxGPUGetComplexity(tempGPU4) != mxCOMPLEX){
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be complex and double precision. %s\n");
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseSSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseDDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseSDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseDSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
}
else{
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else if(!mxIsGPUArray(prhs[0]) && !mxIsGPUArray(prhs[1])) {
mxGPUArray const *tempGPU3;
tempGPU3 = mxGPUCreateFromMxArray(prhs[2]);
mxGPUArray const *tempGPU4;
tempGPU4 = mxGPUCreateFromMxArray(prhs[3]);
if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[1]))){
if ( mxGPUGetComplexity(tempGPU3) != mxREAL || mxGPUGetComplexity(tempGPU4) != mxREAL) {
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be scalar and double precision. %s\n");
return;
}
if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseSSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseDDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseSDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseDSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
}
else if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[1]))){
if ( mxGPUGetComplexity(tempGPU3) != mxCOMPLEX || mxGPUGetComplexity(tempGPU4) != mxCOMPLEX){
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be complex and double precision. %s\n");
return;
}
if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){
mexCuMatlab_sparseSSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){
mexCuMatlab_sparseDDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){
mexCuMatlab_sparseSDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){
mexCuMatlab_sparseDSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
}
else if ((nrhs<4) || (nrhs>4) || (nlhs<1) || (nlhs>1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input/output arguments! input arguments must be four and output argument must be one\n");
return;
}
}
| 3cce687bae94f666465c3f54af2de851e6300276.cu |
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense matrix-sparse/dense matrix addition Z=CuMatlab_add(Sparse/Dense(X),Sparse/Dense(Y), alpha, beta).
* Z= alpha*X+beta*Y
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include "CuMatlab_sparseSSR.cu"
#include "CuMatlab_sparseSSC.cu"
#include "CuMatlab_sparseDDR.cu"
#include "CuMatlab_sparseDDC.cu"
#include "CuMatlab_sparseSDR.cu"
#include "CuMatlab_sparseSDC.cu"
#include "CuMatlab_sparseDSR.cu"
#include "CuMatlab_sparseDSC.cu"
#include <cuda.h>
#include <cuda_runtime.h>
extern "C" static void mexCuMatlab_sparseSSR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSSC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDDR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDDC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSDR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSDC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDSR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDSC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
if (nrhs==4 && nlhs==1) {
if (mxIsGPUArray(prhs[0]) && mxIsGPUArray(prhs[1])) {
mxGPUArray const *tempGPU1;
tempGPU1 = mxGPUCreateFromMxArray(prhs[0]);
mxGPUArray const *tempGPU2;
tempGPU2 = mxGPUCreateFromMxArray(prhs[1]);
mxGPUArray const *tempGPU3;
tempGPU3 = mxGPUCreateFromMxArray(prhs[2]);
mxGPUArray const *tempGPU4;
tempGPU4 = mxGPUCreateFromMxArray(prhs[3]);
if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxREAL) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxREAL) ){
if ( mxGPUGetComplexity(tempGPU3) != mxREAL || mxGPUGetComplexity(tempGPU4) != mxREAL) {
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be scalar and double precision. %s\n");
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseSSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseDDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseSDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseDSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
}
else if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxCOMPLEX) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxCOMPLEX) ){
if ( mxGPUGetComplexity(tempGPU3) != mxCOMPLEX || mxGPUGetComplexity(tempGPU4) != mxCOMPLEX){
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be complex and double precision. %s\n");
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseSSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseDDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseSDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseDSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
}
else{
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else if(!mxIsGPUArray(prhs[0]) && !mxIsGPUArray(prhs[1])) {
mxGPUArray const *tempGPU3;
tempGPU3 = mxGPUCreateFromMxArray(prhs[2]);
mxGPUArray const *tempGPU4;
tempGPU4 = mxGPUCreateFromMxArray(prhs[3]);
if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[1]))){
if ( mxGPUGetComplexity(tempGPU3) != mxREAL || mxGPUGetComplexity(tempGPU4) != mxREAL) {
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be scalar and double precision. %s\n");
return;
}
if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseSSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseDDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseSDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseDSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
}
else if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[1]))){
if ( mxGPUGetComplexity(tempGPU3) != mxCOMPLEX || mxGPUGetComplexity(tempGPU4) != mxCOMPLEX){
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be complex and double precision. %s\n");
return;
}
if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){
mexCuMatlab_sparseSSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){
mexCuMatlab_sparseDDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){
mexCuMatlab_sparseSDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){
mexCuMatlab_sparseDSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU3);
mxGPUDestroyGPUArray(tempGPU4);
return;
}
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
}
else if ((nrhs<4) || (nrhs>4) || (nlhs<1) || (nlhs>1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input/output arguments! input arguments must be four and output argument must be one\n");
return;
}
}
|
a9335900ec0de2e305d5e26812fcc071fd05edd6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "acc_internal.h"
#include "acc_gpu_internal.h"
//funcs
//hipStream_t _ACC_gpu_get_stream(int id);
//void _ACC_gpu_init_stream_map(int table_size);
//void _ACC_gpu_finalize_stream_map();
//void _ACC_gpu_wait(int id);
//void _ACC_gpu_wait_all();
//int _ACC_gpu_test(int id);
//int _ACC_gpu_test_all();
static int calc_hash(int id);
static void create_stream(hipStream_t *stream);
static void destroy_stream(hipStream_t stream);
typedef struct Cell
{
int id;
hipStream_t stream;
void *mpool;
unsigned *block_count;
struct Cell *next;
}Cell;
typedef Cell** StreamMap;
//static StreamMap stream_map = NULL;
static const int table_size = 16;
static Cell* alloc_cell(int id);
static void free_cell(Cell* cell);
static void add_cell(StreamMap stream_map, int id, Cell *cell);
//static Cell* async_sync_cell;
//static Cell* async_noval_cell;
static Cell* alloc_cell(int id)
{
Cell *new_cell = (Cell *)_ACC_alloc(sizeof(Cell));
if(id != ACC_ASYNC_SYNC){
create_stream(&(new_cell->stream));
}else{
new_cell->stream = 0;
}
new_cell->id = id;
_ACC_gpu_mpool_alloc_block(&new_cell->mpool);
_ACC_gpu_calloc((void**)&new_cell->block_count, sizeof(unsigned));
return new_cell;
}
static void free_cell(Cell* cell)
{
if(cell == NULL) return;
if(cell->id != ACC_ASYNC_SYNC){
destroy_stream(cell->stream);
}
_ACC_gpu_mpool_free_block(cell->mpool);
_ACC_gpu_free(cell->block_count);
_ACC_free(cell);
}
void* _ACC_gpu_init_stream_map(int size)
{
_ACC_DEBUG("init_map\n")
//table_size = size;
StreamMap map;
map = (StreamMap)_ACC_alloc(table_size * sizeof(Cell *));
int i;
for(i=0;i<table_size;i++) map[i] = NULL;
//stream_map = map;
Cell* async_sync_cell = alloc_cell(ACC_ASYNC_SYNC);
Cell* async_noval_cell = alloc_cell(ACC_ASYNC_NOVAL);
add_cell(map, ACC_ASYNC_SYNC, async_sync_cell);
add_cell(map, ACC_ASYNC_NOVAL, async_noval_cell);
return map;
}
void _ACC_gpu_finalize_stream_map(void* map)
{
//printf("finalize map\n");
int i;
if(map == NULL) return;
StreamMap st_map = (StreamMap)map;
for(i=0;i<table_size;i++){
Cell *head = st_map[i], *cur, *next;
for(cur = head; cur != NULL; cur = next){
next = cur->next;
free_cell(cur);
cur = NULL;
}
}
_ACC_free(st_map);
}
// void _ACC_gpu_set_stream_map(void* map)
// {
// //stream_map = (StreamMap)map;
// int hash = calc_hash(ACC_ASYNC_SYNC);
// for(Cell *cur = stream_map[hash]; cur != NULL; cur = cur->next){
// if(cur->id == ACC_ASYNC_SYNC){
// async_sync_cell = cur;
// }
// }
// hash = calc_hash(ACC_ASYNC_NOVAL);
// for(Cell *cur = stream_map[hash]; cur != NULL; cur = cur->next){
// if(cur->id == ACC_ASYNC_NOVAL){
// async_noval_cell = cur;
// }
// }
// }
static void add_cell(StreamMap stream_map, int id, Cell *cell)
{
int hash = calc_hash(id);
// StreamMap stream_map = (StreamMap)_ACC_gpu_get_current_stream_map();
cell->next = stream_map[hash];
stream_map[hash] = cell;
}
static Cell* get_cell(int id)
{
//printf("get_cell(%d)\n", id);
// if(id == ACC_ASYNC_SYNC || id == ACC_ASYNC_NOVAL){
// return async_sync_cell;
// }
if(id == ACC_ASYNC_NOVAL) return get_cell(ACC_ASYNC_SYNC);
int hash = calc_hash(id);
StreamMap stream_map = (StreamMap)_ACC_gpu_get_current_stream_map();
for(Cell *cur = stream_map[hash]; cur != NULL; cur = cur->next){
if(cur->id == id){
return cur;
}
}
Cell *new_cell = alloc_cell(id);
add_cell(stream_map, id, new_cell);
return new_cell;
}
hipStream_t _ACC_gpu_get_stream(int id)
{
Cell *cell = get_cell(id);
return cell->stream;
}
//wait func
void _ACC_gpu_wait(int id){
hipStream_t stream = _ACC_gpu_get_stream(id);
hipError_t error = hipStreamSynchronize(stream);
if(error != hipSuccess){
_ACC_gpu_fatal(error);
}
}
void _ACC_gpu_wait_all(){
int i;
StreamMap stream_map = (StreamMap)_ACC_gpu_get_current_stream_map();
for(i=0;i<table_size;i++){
Cell *head = stream_map[i], *cur;
for(cur = head; cur != NULL; cur = cur->next){
//do something
if(cur->id == ACC_ASYNC_NOVAL) continue;
hipError_t error = hipStreamSynchronize(cur->stream);
if(error != hipSuccess){
_ACC_gpu_fatal(error);
}
}
}
}
/*
void _ACC_gpu_wait_async(int id1, int id2){
//id2 waits completion of id1)
if(id1 == id2){
_ACC_gpu_wait(id1);
return;
}
hipStream_t stream1 = _ACC_gpu_getstream(id1);
hipStream_t stream2 = _ACC_gpu_getstream(id2);
hipEvent_t waitEvent;
hipEventCreate(&waitEvent);
}
*/
//test func
int _ACC_gpu_test(int id)
{
hipStream_t stream = _ACC_gpu_get_stream(id);
hipError_t error = hipStreamQuery(stream);
if(error == hipSuccess){
return ~0;
}else{
return 0;
}
}
int _ACC_gpu_test_all()
{
int i;
StreamMap stream_map = (StreamMap)_ACC_gpu_get_current_stream_map();
for(i=0;i<table_size;i++){
Cell *head = stream_map[i], *cur;
for(cur = head; cur != NULL; cur = cur->next){
//do something
hipError_t error = hipStreamQuery(cur->stream);
if(error != hipSuccess){
return 0;
}
}
}
return ~0;
}
//internal functions
static int calc_hash(int id)
{
int r = id%table_size;
if(r < 0){
r += table_size;
}
return r;
}
static void create_stream(hipStream_t *stream)
{
hipError_t error = hipStreamCreate(stream);
//error handling
if(error != hipSuccess){
_ACC_fatal("cant create stream\n");
}
}
static void destroy_stream(hipStream_t stream)
{
hipError_t error = hipStreamDestroy(stream);
if(error != hipSuccess){
_ACC_fatal("can't destroy stream\n");
}
}
/*
//for test
static void print()
{
if(stream_map == NULL){
printf("no map\n");
return;
}
int i;
for(i=0;i<table_size;i++){
printf("StreamMap[%d]:", i);
Cell *head = stream_map[i];
Cell *cur;
for(cur = head; cur!=NULL;cur = cur->next){
printf("(%d, %d)->",cur->id, cur->stream);
}
printf("null\n");
}
}
*/
/*
int main(void) //for test
{
_ACC_gpu_init_stream_map(4);
print();
hipStream_t a,b,c;
a = _ACC_gpu_get_stream(3);
printf("id=3 stream=%lld\n", (long long)a);
_ACC_gpu_get_stream(5);
_ACC_gpu_get_stream(1);
_ACC_gpu_get_stream(3);
printf("id=3 stream=%lld\n", (long long)a);
_ACC_gpu_get_stream(7);
print();
_ACC_gpu_finalize_stream_map();
print();
}
*/
void _ACC_gpu_mpool_get(void **ptr)
{
//*ptr = async_sync_cell->mpool;
_ACC_gpu_mpool_get_async(ptr, ACC_ASYNC_SYNC);
}
void _ACC_gpu_mpool_get_async(void **ptr, int id)
{
Cell *cell = get_cell(id);
*ptr = cell->mpool;
}
void _ACC_gpu_get_block_count(unsigned **count)
{
//*count = async_sync_cell->block_count;
_ACC_gpu_get_block_count_async(count, ACC_ASYNC_SYNC);
}
void _ACC_gpu_get_block_count_async(unsigned **count, int id)
{
Cell *cell = get_cell(id);
*count = cell->block_count;
}
| a9335900ec0de2e305d5e26812fcc071fd05edd6.cu | #include <stdio.h>
#include "acc_internal.h"
#include "acc_gpu_internal.h"
//funcs
//cudaStream_t _ACC_gpu_get_stream(int id);
//void _ACC_gpu_init_stream_map(int table_size);
//void _ACC_gpu_finalize_stream_map();
//void _ACC_gpu_wait(int id);
//void _ACC_gpu_wait_all();
//int _ACC_gpu_test(int id);
//int _ACC_gpu_test_all();
static int calc_hash(int id);
static void create_stream(cudaStream_t *stream);
static void destroy_stream(cudaStream_t stream);
typedef struct Cell
{
int id;
cudaStream_t stream;
void *mpool;
unsigned *block_count;
struct Cell *next;
}Cell;
typedef Cell** StreamMap;
//static StreamMap stream_map = NULL;
static const int table_size = 16;
static Cell* alloc_cell(int id);
static void free_cell(Cell* cell);
static void add_cell(StreamMap stream_map, int id, Cell *cell);
//static Cell* async_sync_cell;
//static Cell* async_noval_cell;
static Cell* alloc_cell(int id)
{
Cell *new_cell = (Cell *)_ACC_alloc(sizeof(Cell));
if(id != ACC_ASYNC_SYNC){
create_stream(&(new_cell->stream));
}else{
new_cell->stream = 0;
}
new_cell->id = id;
_ACC_gpu_mpool_alloc_block(&new_cell->mpool);
_ACC_gpu_calloc((void**)&new_cell->block_count, sizeof(unsigned));
return new_cell;
}
static void free_cell(Cell* cell)
{
if(cell == NULL) return;
if(cell->id != ACC_ASYNC_SYNC){
destroy_stream(cell->stream);
}
_ACC_gpu_mpool_free_block(cell->mpool);
_ACC_gpu_free(cell->block_count);
_ACC_free(cell);
}
void* _ACC_gpu_init_stream_map(int size)
{
_ACC_DEBUG("init_map\n")
//table_size = size;
StreamMap map;
map = (StreamMap)_ACC_alloc(table_size * sizeof(Cell *));
int i;
for(i=0;i<table_size;i++) map[i] = NULL;
//stream_map = map;
Cell* async_sync_cell = alloc_cell(ACC_ASYNC_SYNC);
Cell* async_noval_cell = alloc_cell(ACC_ASYNC_NOVAL);
add_cell(map, ACC_ASYNC_SYNC, async_sync_cell);
add_cell(map, ACC_ASYNC_NOVAL, async_noval_cell);
return map;
}
void _ACC_gpu_finalize_stream_map(void* map)
{
//printf("finalize map\n");
int i;
if(map == NULL) return;
StreamMap st_map = (StreamMap)map;
for(i=0;i<table_size;i++){
Cell *head = st_map[i], *cur, *next;
for(cur = head; cur != NULL; cur = next){
next = cur->next;
free_cell(cur);
cur = NULL;
}
}
_ACC_free(st_map);
}
// void _ACC_gpu_set_stream_map(void* map)
// {
// //stream_map = (StreamMap)map;
// int hash = calc_hash(ACC_ASYNC_SYNC);
// for(Cell *cur = stream_map[hash]; cur != NULL; cur = cur->next){
// if(cur->id == ACC_ASYNC_SYNC){
// async_sync_cell = cur;
// }
// }
// hash = calc_hash(ACC_ASYNC_NOVAL);
// for(Cell *cur = stream_map[hash]; cur != NULL; cur = cur->next){
// if(cur->id == ACC_ASYNC_NOVAL){
// async_noval_cell = cur;
// }
// }
// }
static void add_cell(StreamMap stream_map, int id, Cell *cell)
{
int hash = calc_hash(id);
// StreamMap stream_map = (StreamMap)_ACC_gpu_get_current_stream_map();
cell->next = stream_map[hash];
stream_map[hash] = cell;
}
static Cell* get_cell(int id)
{
//printf("get_cell(%d)\n", id);
// if(id == ACC_ASYNC_SYNC || id == ACC_ASYNC_NOVAL){
// return async_sync_cell;
// }
if(id == ACC_ASYNC_NOVAL) return get_cell(ACC_ASYNC_SYNC);
int hash = calc_hash(id);
StreamMap stream_map = (StreamMap)_ACC_gpu_get_current_stream_map();
for(Cell *cur = stream_map[hash]; cur != NULL; cur = cur->next){
if(cur->id == id){
return cur;
}
}
Cell *new_cell = alloc_cell(id);
add_cell(stream_map, id, new_cell);
return new_cell;
}
cudaStream_t _ACC_gpu_get_stream(int id)
{
Cell *cell = get_cell(id);
return cell->stream;
}
//wait func
void _ACC_gpu_wait(int id){
cudaStream_t stream = _ACC_gpu_get_stream(id);
cudaError_t error = cudaStreamSynchronize(stream);
if(error != cudaSuccess){
_ACC_gpu_fatal(error);
}
}
void _ACC_gpu_wait_all(){
int i;
StreamMap stream_map = (StreamMap)_ACC_gpu_get_current_stream_map();
for(i=0;i<table_size;i++){
Cell *head = stream_map[i], *cur;
for(cur = head; cur != NULL; cur = cur->next){
//do something
if(cur->id == ACC_ASYNC_NOVAL) continue;
cudaError_t error = cudaStreamSynchronize(cur->stream);
if(error != cudaSuccess){
_ACC_gpu_fatal(error);
}
}
}
}
/*
void _ACC_gpu_wait_async(int id1, int id2){
//id2 waits completion of id1)
if(id1 == id2){
_ACC_gpu_wait(id1);
return;
}
cudaStream_t stream1 = _ACC_gpu_getstream(id1);
cudaStream_t stream2 = _ACC_gpu_getstream(id2);
cudaEvent_t waitEvent;
cudaEventCreate(&waitEvent);
}
*/
//test func
int _ACC_gpu_test(int id)
{
cudaStream_t stream = _ACC_gpu_get_stream(id);
cudaError_t error = cudaStreamQuery(stream);
if(error == cudaSuccess){
return ~0;
}else{
return 0;
}
}
int _ACC_gpu_test_all()
{
int i;
StreamMap stream_map = (StreamMap)_ACC_gpu_get_current_stream_map();
for(i=0;i<table_size;i++){
Cell *head = stream_map[i], *cur;
for(cur = head; cur != NULL; cur = cur->next){
//do something
cudaError_t error = cudaStreamQuery(cur->stream);
if(error != cudaSuccess){
return 0;
}
}
}
return ~0;
}
//internal functions
static int calc_hash(int id)
{
int r = id%table_size;
if(r < 0){
r += table_size;
}
return r;
}
static void create_stream(cudaStream_t *stream)
{
cudaError_t error = cudaStreamCreate(stream);
//error handling
if(error != cudaSuccess){
_ACC_fatal("cant create stream\n");
}
}
static void destroy_stream(cudaStream_t stream)
{
cudaError_t error = cudaStreamDestroy(stream);
if(error != cudaSuccess){
_ACC_fatal("can't destroy stream\n");
}
}
/*
//for test
static void print()
{
if(stream_map == NULL){
printf("no map\n");
return;
}
int i;
for(i=0;i<table_size;i++){
printf("StreamMap[%d]:", i);
Cell *head = stream_map[i];
Cell *cur;
for(cur = head; cur!=NULL;cur = cur->next){
printf("(%d, %d)->",cur->id, cur->stream);
}
printf("null\n");
}
}
*/
/*
int main(void) //for test
{
_ACC_gpu_init_stream_map(4);
print();
cudaStream_t a,b,c;
a = _ACC_gpu_get_stream(3);
printf("id=3 stream=%lld\n", (long long)a);
_ACC_gpu_get_stream(5);
_ACC_gpu_get_stream(1);
_ACC_gpu_get_stream(3);
printf("id=3 stream=%lld\n", (long long)a);
_ACC_gpu_get_stream(7);
print();
_ACC_gpu_finalize_stream_map();
print();
}
*/
void _ACC_gpu_mpool_get(void **ptr)
{
//*ptr = async_sync_cell->mpool;
_ACC_gpu_mpool_get_async(ptr, ACC_ASYNC_SYNC);
}
void _ACC_gpu_mpool_get_async(void **ptr, int id)
{
Cell *cell = get_cell(id);
*ptr = cell->mpool;
}
void _ACC_gpu_get_block_count(unsigned **count)
{
//*count = async_sync_cell->block_count;
_ACC_gpu_get_block_count_async(count, ACC_ASYNC_SYNC);
}
void _ACC_gpu_get_block_count_async(unsigned **count, int id)
{
Cell *cell = get_cell(id);
*count = cell->block_count;
}
|
794f5affa2c9a65bd6bb51db86852ec5c7069484.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#define ARRAY_SIZE 1000;
__shared__ unsigned int d_bin_data_shared[256];
__global__ void histogram(
const unsigned int const * d_hist_data,
unsigned int * const d_bin_data
) {
/* thread id */
const unsigned int idx = (blockId.x + blockDim.x) + threadId.x;
const unsigned int idy = (blockId.y + blockDim.y) + threadId.y;
const unsigned int thread_id = (gridDim.x * blockDim.x) * idy + idx;
/* clear shared memory */
d_bin_data_shared[threadIdx.x] = 0;
// fetch data at 32 bits
const unsigned int value_u32 = d_hist_data[thread_id];
// wait for all threads to update shared memory
__syncthreads();
atomicAdd(&(d_bin_data_shared[ ((value_u32 & 0x000000FF)) ]), 1);
atomicAdd(&(d_bin_data_shared[ ((value_u32 & 0x0000FF00)) >> 8]), 1);
atomicAdd(&(d_bin_data_shared[ ((value_u32 & 0x00FF0000)) >> 16]), 1);
atomicAdd(&(d_bin_data_shared[ ((value_u32 & 0xFF000000)) >> 24]), 1);
}
void generate_data(unsigned int* hist){
for(int i = 0; i < ARRAY_SIZE; i++){
hist[i] = random() % 256;
}
}
int main() {
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
unsigned int hist_data[ARRAY_BYTES];
generate_data(hist_data);
return 0;
}
| 794f5affa2c9a65bd6bb51db86852ec5c7069484.cu | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define ARRAY_SIZE 1000;
__shared__ unsigned int d_bin_data_shared[256];
__global__ void histogram(
const unsigned int const * d_hist_data,
unsigned int * const d_bin_data
) {
/* thread id */
const unsigned int idx = (blockId.x + blockDim.x) + threadId.x;
const unsigned int idy = (blockId.y + blockDim.y) + threadId.y;
const unsigned int thread_id = (gridDim.x * blockDim.x) * idy + idx;
/* clear shared memory */
d_bin_data_shared[threadIdx.x] = 0;
// fetch data at 32 bits
const unsigned int value_u32 = d_hist_data[thread_id];
// wait for all threads to update shared memory
__syncthreads();
atomicAdd(&(d_bin_data_shared[ ((value_u32 & 0x000000FF)) ]), 1);
atomicAdd(&(d_bin_data_shared[ ((value_u32 & 0x0000FF00)) >> 8]), 1);
atomicAdd(&(d_bin_data_shared[ ((value_u32 & 0x00FF0000)) >> 16]), 1);
atomicAdd(&(d_bin_data_shared[ ((value_u32 & 0xFF000000)) >> 24]), 1);
}
void generate_data(unsigned int* hist){
for(int i = 0; i < ARRAY_SIZE; i++){
hist[i] = random() % 256;
}
}
int main() {
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
unsigned int hist_data[ARRAY_BYTES];
generate_data(hist_data);
return 0;
}
|
a75e1f9a83c9451d3732cf0242fee30752c44d09.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
*/
#include <cmath>
#include <iostream>
#include <vector>
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/device/gemm_complex.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_reduce.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "gemm_with_softmax.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#define TRACE(x) { std::cout << "gemm_softmax.cu:" << __LINE__ << " " << x << std::endl; }
/////////////////////////////////////////////////////////////////////////////////////////////////
enum class Disposition {
kPassed,
kIncorrect,
kNotVerified
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
int iterations;
unsigned seed;
float alpha;
float beta;
bool verification_enabled;
double tolerance;
Options():
help(false),
problem_size({16, 24, 64}),
batch_count(1), // As a temporary limitation to the test bench, batch count must be 1. The kernels support arbitrary batching.
iterations(20),
seed(2022),
alpha(1),
beta(),
verification_enabled(true),
tolerance(0.01)
{ }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("verify", verification_enabled);
cmd.get_cmd_line_argument("seed", seed);
cmd.get_cmd_line_argument("tolerance", tolerance);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "35_gemm_softmax example\n\n"
<< " This example uses the CUTLASS Library to compute GEMM + Softmax for arbitrary problem sizes.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --seed=<int> Random number seed (1*)\n\n"
<< " --iterations=<int> Number of profiling iterations to perform (0 to disable profiling).\n\n"
<< " --verify=<bool> If true, performs reference calculation.\n\n"
<< " --tolerance <float> Error tolerance\n"
;
out << "\n\nExamples:\n\n"
<< "$ ./examples/35_gemm_softmax/35_gemm_softmax --m=1024 --n=512 \\\n"
<< " --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Returns true if the environment and Toolkit support this
bool supported(bool verbose = true) const {
// Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
if (verbose) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
}
return false;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
if (verbose) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
}
return false;
}
if (!((props.major * 10 + props.minor) >= 80)) {
if (verbose) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
}
return false;
}
return true;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Testbed {
//
// Type definitions
//
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementD = cutlass::half_t;
using ElementCompute = float;
using ElementSoftmax = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using GemmSoftmax = cutlass::GemmSoftmax<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC,
ElementCompute
>;
using ElementN = typename GemmSoftmax::ElementN;
using LayoutC = typename GemmSoftmax::LayoutC;
//
// Data members
//
Options const &options;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementD, LayoutC> tensor_D;
cutlass::HostTensor<ElementN, LayoutC> tensor_N;
cutlass::HostTensor<ElementSoftmax, LayoutC> tensor_Softmax;
cutlass::HostTensor<ElementD, LayoutC> reference_D;
cutlass::HostTensor<ElementN, LayoutC> reference_N;
cutlass::HostTensor<ElementSoftmax, LayoutC> reference_Softmax;
//
// Methods
//
Testbed(
Options const &options_
):
options(options_)
{
tensor_A.reset({options.problem_size.m(), options.problem_size.k()});
tensor_B.reset({options.problem_size.k(), options.problem_size.n()});
tensor_C.reset({options.problem_size.m(), options.problem_size.n()});
tensor_D.reset({options.problem_size.m(), options.problem_size.n()});
tensor_N.reset({options.problem_size.m(), 1});
tensor_Softmax.reset({options.problem_size.m(), options.problem_size.n()});
reference_D.reset({options.problem_size.m(), options.problem_size.n()}, false);
reference_N.reset({options.problem_size.m(), 1}, false);
reference_Softmax.reset({options.problem_size.m(), options.problem_size.n()}, false);
}
/// Run
Disposition run() {
Disposition disposition = Disposition::kNotVerified;
//
// Initialize the workspace
//
initialize();
//
// Launch device kernel
//
cutlass::Status status = cutlass::Status::kSuccess;
status = execute_device_kernel();
if (status != cutlass::Status::kSuccess) {
std::cerr << "Device execution failed." << std::endl;
return disposition;
}
hipError_t result = hipDeviceSynchronize();
if (result != hipSuccess) {
std::cerr << "Device synchronize failed with error "
<< hipGetErrorString(result) << std::endl;
return disposition;
}
//
// Compute the reference
//
compute_reference();
//
// Verify
//
if (options.verification_enabled) {
bool passed = verify();
if (passed) {
disposition = Disposition::kPassed;
}
else {
disposition = Disposition::kIncorrect;
}
}
//
// Profiling
//
if (options.iterations) {
profile();
}
return disposition;
}
/// Random initialization
void initialize() {
cutlass::reference::host::TensorFillRandomUniform(
tensor_A.host_view(),
options.seed,
ElementD(5),
ElementD(-5),
0
);
cutlass::reference::host::TensorFillRandomUniform(
tensor_B.host_view(),
options.seed + 19,
ElementD(5),
ElementD(-5),
0
);
cutlass::reference::host::TensorFill(
reference_D.host_view(),
ElementD()
);
cutlass::reference::host::TensorFill(
reference_N.host_view(),
ElementN()
);
cutlass::reference::host::TensorFill(
reference_Softmax.host_view(),
ElementSoftmax()
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_D.sync_device();
tensor_N.sync_device();
tensor_Softmax.sync_device();
}
cutlass::Status execute_device_kernel() {
cutlass::Status status = cutlass::Status::kSuccess;
//
// Setup arguments
//
GemmSoftmax::Arguments args(
options.problem_size,
options.batch_count,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D.device_ref(),
{
ElementCompute(options.alpha),
ElementCompute(options.beta)
},
tensor_N.device_ref(),
tensor_Softmax.device_ref()
);
//
// Launch
//
GemmSoftmax gemm_softmax;
// Initialize
status = gemm_softmax.initialize(args);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run
status = gemm_softmax();
return status;
}
/// Reference calculation
void compute_reference() {
// Compute GEMM
cutlass::reference::host::GemmComplex(
options.problem_size,
options.alpha,
tensor_A.host_ref(),
cutlass::ComplexTransform::kNone,
tensor_B.host_ref(),
cutlass::ComplexTransform::kNone,
options.beta,
tensor_C.host_ref(),
reference_D.host_ref(),
double()
);
// Compute the norm
for (int m = 0; m < options.problem_size.m(); ++m) {
reference_N.at({m, 0}) = reference_D.at({m, 0});
for (int n = 1; n < options.problem_size.n(); ++n) {
reference_N.at({m, 0}) = ::max(reference_N.at({m, 0}), ElementN(reference_D.at({m, n})));
}
}
// Compute softmax
for (int m = 0; m < options.problem_size.m(); ++m) {
float sum = float();
for (int n = 0; n < options.problem_size.n(); ++n) {
sum += ::exp( float(reference_D.at({m, n})) - float(reference_N.at({m, 0})) );
}
float inv_sum = float(1.0f / sum);
for (int n = 0; n < options.problem_size.n(); ++n) {
reference_Softmax.at({m, n}) = ElementSoftmax(
::exp( float(reference_D.at({m, n})) - float(reference_N.at({m, 0})) ) * inv_sum
);
}
}
}
/// Emits all tensor values
void emit_results() {
std::cout << "D = \n" << tensor_D.host_view() << "\n\n";
std::cout << "N = \n" << tensor_N.host_view() << "\n\n";
std::cout << "Softmax = \n" << tensor_Softmax.host_view() << "\n\n";
std::cout << "Reference N = \n" << reference_N.host_view() << "\n\n";
std::cout << "Reference D = \n" << reference_D.host_view() << "\n\n";
std::cout << "Reference Softmax = \n" << reference_Softmax.host_view() << "\n\n";
}
/// Verifies the reference matches
bool verify() {
tensor_D.sync_host();
tensor_N.sync_host();
tensor_Softmax.sync_host();
double const kThreshold = options.tolerance;
// Verification checks - set any of these to 'true' to override the verification checks.
bool verified_D = false;
bool verified_N = false;
bool verified_Softmax = false;
// Verify softmax output
if (!verified_D) {
double norm_diff = cutlass::reference::host::TensorNormDiff(
tensor_D.host_view(),
reference_D.host_view());
double norm_reference = cutlass::reference::host::TensorNorm(
reference_D.host_view());
double rel_error = norm_diff / norm_reference;
if (rel_error > kThreshold) {
std::cerr << "\n\nTensor D Relative error: " << rel_error << std::endl;
}
else {
verified_D = true;
}
}
if (!verified_N) {
double norm_diff = cutlass::reference::host::TensorNormDiff(
tensor_N.host_view(),
reference_N.host_view());
double norm_reference = cutlass::reference::host::TensorNorm(
reference_N.host_view());
double rel_error = norm_diff / norm_reference;
if (rel_error > kThreshold) {
std::cerr << "\n\nTensor N Relative error: " << rel_error << std::endl;
}
else {
verified_N = true;
}
}
if (!verified_Softmax) {
double norm_diff = cutlass::reference::host::TensorNormDiff(
tensor_Softmax.host_view(),
reference_Softmax.host_view());
double norm_reference = cutlass::reference::host::TensorNorm(
reference_Softmax.host_view());
double rel_error = norm_diff / norm_reference;
if (rel_error > kThreshold) {
std::cerr << "\n\nSoftmax Relative error: " << rel_error << std::endl;
}
else {
verified_Softmax = true;
}
}
if (!verified_D || !verified_N || !verified_Softmax) {
std::cerr << "Verification check failed for tensor Softmax" << std::endl;
emit_results();
// Summarize which checks failed
if (!verified_D) {
std::cerr << "Verification of D tensor failed\n";
}
if (!verified_N) {
std::cerr << "Verification of N tensor failed\n";
}
if (!verified_Softmax) {
std::cerr << "Verification of Softmax tensor failed\n";
}
return false;
}
return true;
}
/// Profiles
bool profile() {
//
// Profile
//
cutlass::Status status = cutlass::Status::kSuccess;
hipError_t result;
hipEvent_t events[2];
int const kIterations = options.iterations;
for (hipEvent_t &evt : events) {
result = hipEventCreate(&evt);
if (result != hipSuccess) {
std::cerr << "hipEventCreate failed with error " << hipGetErrorString(result) << std::endl;
return false;
}
}
result = hipEventRecord(events[0]);
if (result != hipSuccess) {
std::cerr << "hipEventRecord() failed with error " << hipGetErrorString(result) << std::endl;
return false;
}
for (int iter = 0; iter < kIterations; ++iter) {
status = execute_device_kernel();
if (status != cutlass::Status::kSuccess) {
std::cerr << "Device execution failed." << std::endl;
return false;
}
}
result = hipEventRecord(events[1]);
if (result != hipSuccess) {
std::cerr << "hipEventRecord() failed with error " << hipGetErrorString(result) << std::endl;
return false;
}
result = hipDeviceSynchronize();
if (result != hipSuccess) {
std::cerr << "hipDeviceSynchronize() failed with error " << hipGetErrorString(result) << std::endl;
return false;
}
float elapsed_ms = 0;
result = hipEventElapsedTime(&elapsed_ms, events[0], events[1]);
if (result != hipSuccess) {
std::cerr << "hipEventElapsedTime() failed with error " << hipGetErrorString(result) << std::endl;
return false;
}
for (hipEvent_t &evt : events) {
result = hipEventDestroy(evt);
if (result != hipSuccess) {
std::cerr << "hipEventDestroy() failed with error " << hipGetErrorString(result) << std::endl;
return false;
}
}
int64_t flops = int64_t(options.problem_size.m()) * options.problem_size.n() * options.problem_size.k() * 2;
int64_t bytes = (sizeof(ElementD) * 2 + sizeof(ElementSoftmax)) * options.problem_size.m() * options.problem_size.n();
double gflops_per_second = double(flops) * kIterations / double(elapsed_ms / 1000.0f) / double(1.0e9);
double gbytes_per_second = double(bytes) * kIterations / double(elapsed_ms / 1000.0f) / double(1 << 30);
std::cout << " Problem: "
<< options.problem_size.m() << "-by-" << options.problem_size.n() << "-by-" << options.problem_size.k()
<< std::endl;
std::cout << " Runtime: " << elapsed_ms << " ms\n" << std::endl;
std::cout << " GFLOPs: " << gflops_per_second << " GFLOPs" << std::endl;
std::cout << "Memory bandwidth: " << gbytes_per_second << " GiB/s" << std::endl;
return true;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv) {
// Options parsing
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (!options.supported()) {
return 0;
}
// Run
Testbed testbed(options);
Disposition disposition = testbed.run();
std::cout << std::endl;
switch (disposition) {
case Disposition::kPassed:
std::cout << "Passed" << std::endl;
break;
case Disposition::kIncorrect:
std::cout << "Incorrect" << std::endl;
break;
case Disposition::kNotVerified:
std::cout << "Not verified" << std::endl;
break;
}
return (disposition == Disposition::kPassed ? 0 : -1);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| a75e1f9a83c9451d3732cf0242fee30752c44d09.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
*/
#include <cmath>
#include <iostream>
#include <vector>
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/device/gemm_complex.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_reduce.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "gemm_with_softmax.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#define TRACE(x) { std::cout << "gemm_softmax.cu:" << __LINE__ << " " << x << std::endl; }
/////////////////////////////////////////////////////////////////////////////////////////////////
enum class Disposition {
kPassed,
kIncorrect,
kNotVerified
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
int iterations;
unsigned seed;
float alpha;
float beta;
bool verification_enabled;
double tolerance;
Options():
help(false),
problem_size({16, 24, 64}),
batch_count(1), // As a temporary limitation to the test bench, batch count must be 1. The kernels support arbitrary batching.
iterations(20),
seed(2022),
alpha(1),
beta(),
verification_enabled(true),
tolerance(0.01)
{ }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("verify", verification_enabled);
cmd.get_cmd_line_argument("seed", seed);
cmd.get_cmd_line_argument("tolerance", tolerance);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "35_gemm_softmax example\n\n"
<< " This example uses the CUTLASS Library to compute GEMM + Softmax for arbitrary problem sizes.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --seed=<int> Random number seed (1*)\n\n"
<< " --iterations=<int> Number of profiling iterations to perform (0 to disable profiling).\n\n"
<< " --verify=<bool> If true, performs reference calculation.\n\n"
<< " --tolerance <float> Error tolerance\n"
;
out << "\n\nExamples:\n\n"
<< "$ ./examples/35_gemm_softmax/35_gemm_softmax --m=1024 --n=512 \\\n"
<< " --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Returns true if the environment and Toolkit support this
bool supported(bool verbose = true) const {
// Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
if (verbose) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
}
return false;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
if (verbose) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
}
return false;
}
if (!((props.major * 10 + props.minor) >= 80)) {
if (verbose) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
}
return false;
}
return true;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Testbed {
//
// Type definitions
//
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementD = cutlass::half_t;
using ElementCompute = float;
using ElementSoftmax = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using GemmSoftmax = cutlass::GemmSoftmax<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC,
ElementCompute
>;
using ElementN = typename GemmSoftmax::ElementN;
using LayoutC = typename GemmSoftmax::LayoutC;
//
// Data members
//
Options const &options;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementD, LayoutC> tensor_D;
cutlass::HostTensor<ElementN, LayoutC> tensor_N;
cutlass::HostTensor<ElementSoftmax, LayoutC> tensor_Softmax;
cutlass::HostTensor<ElementD, LayoutC> reference_D;
cutlass::HostTensor<ElementN, LayoutC> reference_N;
cutlass::HostTensor<ElementSoftmax, LayoutC> reference_Softmax;
//
// Methods
//
Testbed(
Options const &options_
):
options(options_)
{
tensor_A.reset({options.problem_size.m(), options.problem_size.k()});
tensor_B.reset({options.problem_size.k(), options.problem_size.n()});
tensor_C.reset({options.problem_size.m(), options.problem_size.n()});
tensor_D.reset({options.problem_size.m(), options.problem_size.n()});
tensor_N.reset({options.problem_size.m(), 1});
tensor_Softmax.reset({options.problem_size.m(), options.problem_size.n()});
reference_D.reset({options.problem_size.m(), options.problem_size.n()}, false);
reference_N.reset({options.problem_size.m(), 1}, false);
reference_Softmax.reset({options.problem_size.m(), options.problem_size.n()}, false);
}
/// Run
Disposition run() {
Disposition disposition = Disposition::kNotVerified;
//
// Initialize the workspace
//
initialize();
//
// Launch device kernel
//
cutlass::Status status = cutlass::Status::kSuccess;
status = execute_device_kernel();
if (status != cutlass::Status::kSuccess) {
std::cerr << "Device execution failed." << std::endl;
return disposition;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Device synchronize failed with error "
<< cudaGetErrorString(result) << std::endl;
return disposition;
}
//
// Compute the reference
//
compute_reference();
//
// Verify
//
if (options.verification_enabled) {
bool passed = verify();
if (passed) {
disposition = Disposition::kPassed;
}
else {
disposition = Disposition::kIncorrect;
}
}
//
// Profiling
//
if (options.iterations) {
profile();
}
return disposition;
}
/// Random initialization
void initialize() {
cutlass::reference::host::TensorFillRandomUniform(
tensor_A.host_view(),
options.seed,
ElementD(5),
ElementD(-5),
0
);
cutlass::reference::host::TensorFillRandomUniform(
tensor_B.host_view(),
options.seed + 19,
ElementD(5),
ElementD(-5),
0
);
cutlass::reference::host::TensorFill(
reference_D.host_view(),
ElementD()
);
cutlass::reference::host::TensorFill(
reference_N.host_view(),
ElementN()
);
cutlass::reference::host::TensorFill(
reference_Softmax.host_view(),
ElementSoftmax()
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_D.sync_device();
tensor_N.sync_device();
tensor_Softmax.sync_device();
}
cutlass::Status execute_device_kernel() {
cutlass::Status status = cutlass::Status::kSuccess;
//
// Setup arguments
//
GemmSoftmax::Arguments args(
options.problem_size,
options.batch_count,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D.device_ref(),
{
ElementCompute(options.alpha),
ElementCompute(options.beta)
},
tensor_N.device_ref(),
tensor_Softmax.device_ref()
);
//
// Launch
//
GemmSoftmax gemm_softmax;
// Initialize
status = gemm_softmax.initialize(args);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run
status = gemm_softmax();
return status;
}
/// Reference calculation
void compute_reference() {
// Compute GEMM
cutlass::reference::host::GemmComplex(
options.problem_size,
options.alpha,
tensor_A.host_ref(),
cutlass::ComplexTransform::kNone,
tensor_B.host_ref(),
cutlass::ComplexTransform::kNone,
options.beta,
tensor_C.host_ref(),
reference_D.host_ref(),
double()
);
// Compute the norm
for (int m = 0; m < options.problem_size.m(); ++m) {
reference_N.at({m, 0}) = reference_D.at({m, 0});
for (int n = 1; n < options.problem_size.n(); ++n) {
reference_N.at({m, 0}) = std::max(reference_N.at({m, 0}), ElementN(reference_D.at({m, n})));
}
}
// Compute softmax
for (int m = 0; m < options.problem_size.m(); ++m) {
float sum = float();
for (int n = 0; n < options.problem_size.n(); ++n) {
sum += std::exp( float(reference_D.at({m, n})) - float(reference_N.at({m, 0})) );
}
float inv_sum = float(1.0f / sum);
for (int n = 0; n < options.problem_size.n(); ++n) {
reference_Softmax.at({m, n}) = ElementSoftmax(
std::exp( float(reference_D.at({m, n})) - float(reference_N.at({m, 0})) ) * inv_sum
);
}
}
}
/// Emits all tensor values
void emit_results() {
std::cout << "D = \n" << tensor_D.host_view() << "\n\n";
std::cout << "N = \n" << tensor_N.host_view() << "\n\n";
std::cout << "Softmax = \n" << tensor_Softmax.host_view() << "\n\n";
std::cout << "Reference N = \n" << reference_N.host_view() << "\n\n";
std::cout << "Reference D = \n" << reference_D.host_view() << "\n\n";
std::cout << "Reference Softmax = \n" << reference_Softmax.host_view() << "\n\n";
}
/// Verifies the reference matches
bool verify() {
tensor_D.sync_host();
tensor_N.sync_host();
tensor_Softmax.sync_host();
double const kThreshold = options.tolerance;
// Verification checks - set any of these to 'true' to override the verification checks.
bool verified_D = false;
bool verified_N = false;
bool verified_Softmax = false;
// Verify softmax output
if (!verified_D) {
double norm_diff = cutlass::reference::host::TensorNormDiff(
tensor_D.host_view(),
reference_D.host_view());
double norm_reference = cutlass::reference::host::TensorNorm(
reference_D.host_view());
double rel_error = norm_diff / norm_reference;
if (rel_error > kThreshold) {
std::cerr << "\n\nTensor D Relative error: " << rel_error << std::endl;
}
else {
verified_D = true;
}
}
if (!verified_N) {
double norm_diff = cutlass::reference::host::TensorNormDiff(
tensor_N.host_view(),
reference_N.host_view());
double norm_reference = cutlass::reference::host::TensorNorm(
reference_N.host_view());
double rel_error = norm_diff / norm_reference;
if (rel_error > kThreshold) {
std::cerr << "\n\nTensor N Relative error: " << rel_error << std::endl;
}
else {
verified_N = true;
}
}
if (!verified_Softmax) {
double norm_diff = cutlass::reference::host::TensorNormDiff(
tensor_Softmax.host_view(),
reference_Softmax.host_view());
double norm_reference = cutlass::reference::host::TensorNorm(
reference_Softmax.host_view());
double rel_error = norm_diff / norm_reference;
if (rel_error > kThreshold) {
std::cerr << "\n\nSoftmax Relative error: " << rel_error << std::endl;
}
else {
verified_Softmax = true;
}
}
if (!verified_D || !verified_N || !verified_Softmax) {
std::cerr << "Verification check failed for tensor Softmax" << std::endl;
emit_results();
// Summarize which checks failed
if (!verified_D) {
std::cerr << "Verification of D tensor failed\n";
}
if (!verified_N) {
std::cerr << "Verification of N tensor failed\n";
}
if (!verified_Softmax) {
std::cerr << "Verification of Softmax tensor failed\n";
}
return false;
}
return true;
}
/// Profiles
bool profile() {
//
// Profile
//
cutlass::Status status = cutlass::Status::kSuccess;
cudaError_t result;
cudaEvent_t events[2];
int const kIterations = options.iterations;
for (cudaEvent_t &evt : events) {
result = cudaEventCreate(&evt);
if (result != cudaSuccess) {
std::cerr << "cudaEventCreate failed with error " << cudaGetErrorString(result) << std::endl;
return false;
}
}
result = cudaEventRecord(events[0]);
if (result != cudaSuccess) {
std::cerr << "cudaEventRecord() failed with error " << cudaGetErrorString(result) << std::endl;
return false;
}
for (int iter = 0; iter < kIterations; ++iter) {
status = execute_device_kernel();
if (status != cutlass::Status::kSuccess) {
std::cerr << "Device execution failed." << std::endl;
return false;
}
}
result = cudaEventRecord(events[1]);
if (result != cudaSuccess) {
std::cerr << "cudaEventRecord() failed with error " << cudaGetErrorString(result) << std::endl;
return false;
}
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "cudaDeviceSynchronize() failed with error " << cudaGetErrorString(result) << std::endl;
return false;
}
float elapsed_ms = 0;
result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]);
if (result != cudaSuccess) {
std::cerr << "cudaEventElapsedTime() failed with error " << cudaGetErrorString(result) << std::endl;
return false;
}
for (cudaEvent_t &evt : events) {
result = cudaEventDestroy(evt);
if (result != cudaSuccess) {
std::cerr << "cudaEventDestroy() failed with error " << cudaGetErrorString(result) << std::endl;
return false;
}
}
int64_t flops = int64_t(options.problem_size.m()) * options.problem_size.n() * options.problem_size.k() * 2;
int64_t bytes = (sizeof(ElementD) * 2 + sizeof(ElementSoftmax)) * options.problem_size.m() * options.problem_size.n();
double gflops_per_second = double(flops) * kIterations / double(elapsed_ms / 1000.0f) / double(1.0e9);
double gbytes_per_second = double(bytes) * kIterations / double(elapsed_ms / 1000.0f) / double(1 << 30);
std::cout << " Problem: "
<< options.problem_size.m() << "-by-" << options.problem_size.n() << "-by-" << options.problem_size.k()
<< std::endl;
std::cout << " Runtime: " << elapsed_ms << " ms\n" << std::endl;
std::cout << " GFLOPs: " << gflops_per_second << " GFLOPs" << std::endl;
std::cout << "Memory bandwidth: " << gbytes_per_second << " GiB/s" << std::endl;
return true;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv) {
// Options parsing
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (!options.supported()) {
return 0;
}
// Run
Testbed testbed(options);
Disposition disposition = testbed.run();
std::cout << std::endl;
switch (disposition) {
case Disposition::kPassed:
std::cout << "Passed" << std::endl;
break;
case Disposition::kIncorrect:
std::cout << "Incorrect" << std::endl;
break;
case Disposition::kNotVerified:
std::cout << "Not verified" << std::endl;
break;
}
return (disposition == Disposition::kPassed ? 0 : -1);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
2b22996efad0e6e68ced37d2eedd3f6534c32426.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <iostream>
#include <time.h>
#define DIM1 3
#define DIM2 3
__global__ void avg(float* in, float* out, int radius)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < DIM1 * DIM2)
{
int x = tid / DIM1;
int y = tid % DIM2;
float count = 0;
float val = 0;
for(int i = -1 * radius; i <= radius; i++)
{
int nx = i + x;
if(nx >= 0 && nx < DIM1)
for(int j = -1 * radius; j <= radius; j++)
{
int ny = j + y;
if(i*i + j*j <= radius * radius && ny >= 0 && ny < DIM2)
val += in[nx * DIM1 + ny], count++;
}
}
out[tid] = val/count;
}
}
int main()
{
float* in = (float*)malloc(DIM1*DIM2*sizeof(float));
srand(time(NULL));
for(int i = 0; i < DIM1*DIM2; i++)
in[i] = rand()%1000;
/* std::cout << "Original:" << std::endl;
for(int i = 0; i < DIM1; ++i) {
for(int j = 0; j < DIM2; ++j)
std::cout << in[i*DIM1 + j] << " ";
std::cout << std::endl;
}*/
float* din;
float* dout;
hipMalloc((void**)&din, DIM1 * DIM2 * sizeof(float));
hipMalloc((void**)&dout, DIM1 * DIM2 * sizeof(float));
hipMemcpy(din, in, DIM1 * DIM2 * sizeof(float), hipMemcpyHostToDevice);
int TPB = 9;
hipLaunchKernelGGL(( avg), dim3((DIM1*DIM2 + TPB - 1)/TPB), dim3(TPB), 0, 0, din, dout, 2);
float* out = (float*)malloc(DIM1*DIM2*sizeof(float));
hipMemcpy(out, dout, DIM1 * DIM2 * sizeof(float), hipMemcpyDeviceToHost);
std::cout << "Averaged:" << std::endl;
for(int x = 0; x < DIM1; x++)
{
if (x % 10 == 0)
{
for(int y = 0; y < DIM2; y++)
if(y % 10 == 0)
std::cout << out[x*DIM1+y] << " ";
std::cout << std::endl;
}
}
hipFree(din); hipFree(dout);
free(in); free(out);
}
| 2b22996efad0e6e68ced37d2eedd3f6534c32426.cu | #include <cstdlib>
#include <iostream>
#include <time.h>
#define DIM1 3
#define DIM2 3
__global__ void avg(float* in, float* out, int radius)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < DIM1 * DIM2)
{
int x = tid / DIM1;
int y = tid % DIM2;
float count = 0;
float val = 0;
for(int i = -1 * radius; i <= radius; i++)
{
int nx = i + x;
if(nx >= 0 && nx < DIM1)
for(int j = -1 * radius; j <= radius; j++)
{
int ny = j + y;
if(i*i + j*j <= radius * radius && ny >= 0 && ny < DIM2)
val += in[nx * DIM1 + ny], count++;
}
}
out[tid] = val/count;
}
}
int main()
{
float* in = (float*)malloc(DIM1*DIM2*sizeof(float));
srand(time(NULL));
for(int i = 0; i < DIM1*DIM2; i++)
in[i] = rand()%1000;
/* std::cout << "Original:" << std::endl;
for(int i = 0; i < DIM1; ++i) {
for(int j = 0; j < DIM2; ++j)
std::cout << in[i*DIM1 + j] << " ";
std::cout << std::endl;
}*/
float* din;
float* dout;
cudaMalloc((void**)&din, DIM1 * DIM2 * sizeof(float));
cudaMalloc((void**)&dout, DIM1 * DIM2 * sizeof(float));
cudaMemcpy(din, in, DIM1 * DIM2 * sizeof(float), cudaMemcpyHostToDevice);
int TPB = 9;
avg<<<(DIM1*DIM2 + TPB - 1)/TPB, TPB>>>(din, dout, 2);
float* out = (float*)malloc(DIM1*DIM2*sizeof(float));
cudaMemcpy(out, dout, DIM1 * DIM2 * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Averaged:" << std::endl;
for(int x = 0; x < DIM1; x++)
{
if (x % 10 == 0)
{
for(int y = 0; y < DIM2; y++)
if(y % 10 == 0)
std::cout << out[x*DIM1+y] << " ";
std::cout << std::endl;
}
}
cudaFree(din); cudaFree(dout);
free(in); free(out);
}
|
9c1bbd8b48d1731d9180f6e797600c750804f32c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
//---------------------------------------------------------------------------------
static const int N = 10; //Number of rows and columns
using namespace std;
//---------------------------------------------------------------------------------
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
//---------------------------------------------------------------------------------
__global__ void matrixTranspose(unsigned int* A_d, unsigned int *T_d, int n) {
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Populate matrixTranspose kernel function ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
int i = threadIdx.x;
int j = threadIdx.y;
if(i<n&&j<n)
T_d[i+j*n] = A_d[j+i*n];
}
//---------------------------------------------------------------------------------
int main(void) {
unsigned int **A ;
unsigned int **T ;
unsigned int *A_h;
unsigned int *A_d;
unsigned int *T_h;
unsigned int *T_d;
//Set Device
CUDA_CHECK_RETURN(hipSetDevice(0));
//See random number generator
srand(time(NULL));
//Clear command prompt
cout << "\033[2J\033[1;1H";
cout << "Allocating arrays on host ... ";
A_h = new unsigned int[N*N];
T_h = new unsigned int[N*N];
A = new unsigned int* [N];
for (int i = 0; i < N; ++i) {
A[i] = new unsigned int[N];
}
T = new unsigned int* [N];
for (int i = 0; i < N; ++i) {
T[i] = new unsigned int[N];
}
cout << "done.\nPopluating input matrix on host ...";
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
A[i][j] = rand();
}
}
cout << "done.\nConverting 2-dimensional input matrix to 1-dimensional array on host ... ";
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Add code for converting 2-dimensional input matrix to 1-dimensional array here ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
A_h[i*N+j] = A[i][j];
}
}
cout << "done.\nAllocating arrays on device ... ";
CUDA_CHECK_RETURN(
hipMalloc((void** ) &A_d, sizeof(unsigned int) * N*N));
CUDA_CHECK_RETURN(
hipMalloc((void** ) &T_d, sizeof(unsigned int) * N*N));
cout << "done.\nCopying arrays from host to device ... ";
CUDA_CHECK_RETURN(
hipMemcpy(A_d, A_h, sizeof(int) * N*N,
hipMemcpyHostToDevice));
cout << "done.\nLaunching kernel ... ";
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** define kernel launch parameters ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
dim3 dimGrid( 1,1, 1);
dim3 dimBlock(10,10, 1);
//Time kernel launch
//Time kernel launch
hipEvent_t start, stop;
CUDA_CHECK_RETURN(hipEventCreate(&start));
CUDA_CHECK_RETURN(hipEventCreate(&stop));
float elapsedTime;
CUDA_CHECK_RETURN(hipEventRecord(start, 0));
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Add kernel call here ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
hipLaunchKernelGGL(( matrixTranspose), dim3(dimGrid), dim3(dimBlock) , 0, 0, A_d, T_d, N);
CUDA_CHECK_RETURN(hipEventRecord(stop, 0));
CUDA_CHECK_RETURN(hipEventSynchronize(stop));
CUDA_CHECK_RETURN(hipEventElapsedTime(&elapsedTime, start, stop));
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(hipGetLastError()); //Check if an error occurred in device code
CUDA_CHECK_RETURN(hipEventDestroy(start));
CUDA_CHECK_RETURN(hipEventDestroy(stop));
cout << "done.\nElapsed kernel time: " << elapsedTime << " ms\n";
cout << "Copying results back to host .... ";
CUDA_CHECK_RETURN(
hipMemcpy(T_h, T_d, sizeof(int) * N*N,
hipMemcpyDeviceToHost));
cout << "done.\nConverting 1-dimensional output array to 2-dimensional matrix on host ... ";
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Add code for converting 1-dimensional output array to 2-dimensional matrix here ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
T[i][j] =T_h[i*N+j] ;
}
}
cout << "done.\nVerifying results on host ...";
//Add code to time host calculations
clock_t st, ed;
st = clock();
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Check that results from kernel are correct ****
// **** Complete validation code below ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
bool valid = true;
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
if(T[i][j] != A[j][i])
{
cout << "\n\ndone.\n***GPU results are incorrect***";
valid = false;
break;
}
}
if(!valid){
break;
}
}
cout << "done\n";
if (valid) {
cout << "GPU results are valid.\n";
}
ed = clock() - st;
cout << "Elapsed time on host: " << ((float) ed) / CLOCKS_PER_SEC * 1000
<< " ms" << endl;
cout << "Freeing memory on device ... ";
CUDA_CHECK_RETURN(hipFree((void* ) A_d));
CUDA_CHECK_RETURN(hipFree((void* ) T_d));
CUDA_CHECK_RETURN(hipDeviceReset());
cout << "done.\nFreeing memory on host ... ";
delete[] A_h;
delete[] T_h;
for (int i = 0; i < N; ++i) {
delete[] A[i];
}
delete[] A;
for (int i = 0; i < N; ++i) {
delete[] T[i];
}
delete[] T;
cout << "done.\nExiting program.\n";
cout<<" Kushagra Trivedi\n 3080669\n";
return 0;
}
| 9c1bbd8b48d1731d9180f6e797600c750804f32c.cu | //---------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
//---------------------------------------------------------------------------------
static const int N = 10; //Number of rows and columns
using namespace std;
//---------------------------------------------------------------------------------
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
//---------------------------------------------------------------------------------
__global__ void matrixTranspose(unsigned int* A_d, unsigned int *T_d, int n) {
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Populate matrixTranspose kernel function ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
int i = threadIdx.x;
int j = threadIdx.y;
if(i<n&&j<n)
T_d[i+j*n] = A_d[j+i*n];
}
//---------------------------------------------------------------------------------
int main(void) {
unsigned int **A ;
unsigned int **T ;
unsigned int *A_h;
unsigned int *A_d;
unsigned int *T_h;
unsigned int *T_d;
//Set Device
CUDA_CHECK_RETURN(cudaSetDevice(0));
//See random number generator
srand(time(NULL));
//Clear command prompt
cout << "\033[2J\033[1;1H";
cout << "Allocating arrays on host ... ";
A_h = new unsigned int[N*N];
T_h = new unsigned int[N*N];
A = new unsigned int* [N];
for (int i = 0; i < N; ++i) {
A[i] = new unsigned int[N];
}
T = new unsigned int* [N];
for (int i = 0; i < N; ++i) {
T[i] = new unsigned int[N];
}
cout << "done.\nPopluating input matrix on host ...";
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
A[i][j] = rand();
}
}
cout << "done.\nConverting 2-dimensional input matrix to 1-dimensional array on host ... ";
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Add code for converting 2-dimensional input matrix to 1-dimensional array here ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
A_h[i*N+j] = A[i][j];
}
}
cout << "done.\nAllocating arrays on device ... ";
CUDA_CHECK_RETURN(
cudaMalloc((void** ) &A_d, sizeof(unsigned int) * N*N));
CUDA_CHECK_RETURN(
cudaMalloc((void** ) &T_d, sizeof(unsigned int) * N*N));
cout << "done.\nCopying arrays from host to device ... ";
CUDA_CHECK_RETURN(
cudaMemcpy(A_d, A_h, sizeof(int) * N*N,
cudaMemcpyHostToDevice));
cout << "done.\nLaunching kernel ... ";
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** define kernel launch parameters ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
dim3 dimGrid( 1,1, 1);
dim3 dimBlock(10,10, 1);
//Time kernel launch
//Time kernel launch
cudaEvent_t start, stop;
CUDA_CHECK_RETURN(cudaEventCreate(&start));
CUDA_CHECK_RETURN(cudaEventCreate(&stop));
float elapsedTime;
CUDA_CHECK_RETURN(cudaEventRecord(start, 0));
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Add kernel call here ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
matrixTranspose<<< dimGrid, dimBlock >>>(A_d, T_d, N);
CUDA_CHECK_RETURN(cudaEventRecord(stop, 0));
CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsedTime, start, stop));
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError()); //Check if an error occurred in device code
CUDA_CHECK_RETURN(cudaEventDestroy(start));
CUDA_CHECK_RETURN(cudaEventDestroy(stop));
cout << "done.\nElapsed kernel time: " << elapsedTime << " ms\n";
cout << "Copying results back to host .... ";
CUDA_CHECK_RETURN(
cudaMemcpy(T_h, T_d, sizeof(int) * N*N,
cudaMemcpyDeviceToHost));
cout << "done.\nConverting 1-dimensional output array to 2-dimensional matrix on host ... ";
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Add code for converting 1-dimensional output array to 2-dimensional matrix here ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
T[i][j] =T_h[i*N+j] ;
}
}
cout << "done.\nVerifying results on host ...";
//Add code to time host calculations
clock_t st, ed;
st = clock();
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Check that results from kernel are correct ****
// **** Complete validation code below ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
bool valid = true;
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
if(T[i][j] != A[j][i])
{
cout << "\n\ndone.\n***GPU results are incorrect***";
valid = false;
break;
}
}
if(!valid){
break;
}
}
cout << "done\n";
if (valid) {
cout << "GPU results are valid.\n";
}
ed = clock() - st;
cout << "Elapsed time on host: " << ((float) ed) / CLOCKS_PER_SEC * 1000
<< " ms" << endl;
cout << "Freeing memory on device ... ";
CUDA_CHECK_RETURN(cudaFree((void* ) A_d));
CUDA_CHECK_RETURN(cudaFree((void* ) T_d));
CUDA_CHECK_RETURN(cudaDeviceReset());
cout << "done.\nFreeing memory on host ... ";
delete[] A_h;
delete[] T_h;
for (int i = 0; i < N; ++i) {
delete[] A[i];
}
delete[] A;
for (int i = 0; i < N; ++i) {
delete[] T[i];
}
delete[] T;
cout << "done.\nExiting program.\n";
cout<<" Kushagra Trivedi\n 3080669\n";
return 0;
}
|
1537f77a400cd0148d5bad6c3b0753cf2f6d0c4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void findMaxInAccum(unsigned int* accum, int w_accum, int h_accum, int* dev_points, int* max)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int tid = y * w_accum + x;
if (x >= w_accum || y >= h_accum)
return;
int old = (int)accum[tid];
atomicMax(&max[0], (int)accum[tid]);
if (old == max[0]) {
atomicExch(&dev_points[0], x);
atomicExch(&dev_points[1], y);
}
return;
} | 1537f77a400cd0148d5bad6c3b0753cf2f6d0c4b.cu | __global__ void findMaxInAccum(unsigned int* accum, int w_accum, int h_accum, int* dev_points, int* max)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int tid = y * w_accum + x;
if (x >= w_accum || y >= h_accum)
return;
int old = (int)accum[tid];
atomicMax(&max[0], (int)accum[tid]);
if (old == max[0]) {
atomicExch(&dev_points[0], x);
atomicExch(&dev_points[1], y);
}
return;
} |
dc6495889ab8556a0dbf6e533da16e3a6fecbbf4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_functions.h"
#include "cuda_globals.h"
__device__ myprec d_phi[mx*my*mz];
__device__ myprec d_rhs1[mx*my*mz];
__device__ myprec d_rhs2[mx*my*mz];
__device__ myprec d_rhs3[mx*my*mz];
__device__ myprec d_rhs4[mx*my*mz];
__device__ myprec d_temp[mx*my*mz];
__device__ myprec d_tmp[mx*my*mz];
__device__ void RHSDevice(myprec *rhs, myprec *var, Indices id) {
#if parentGrid==0
derDev1x(rhs,var,id);
#elif parentGrid==1
derDev1y(rhs,var,id);
#else
derDev1z(rhs,var,id);
#endif
rhs[id.g] = -rhs[id.g]*U;
}
__device__ void rk4Device(Indices id) {
RHSDevice(d_rhs1,d_phi,id);
d_temp[id.g] = (d_phi[id.g] + d_rhs1[id.g]*d_dt/2);
RHSDevice(d_rhs2,d_temp,id);
d_temp[id.g] = (d_phi[id.g] + d_rhs2[id.g]*d_dt/2);
RHSDevice(d_rhs3,d_temp,id);
d_temp[id.g] = (d_phi[id.g] + d_rhs3[id.g]*d_dt);
RHSDevice(d_rhs4,d_temp,id);
d_phi[id.g] = d_phi[id.g] + d_dt*
( d_rhs1[id.g] +
2*d_rhs2[id.g] +
2*d_rhs3[id.g] +
d_rhs4[id.g])/6.;
}
__global__ void runDevice() {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
#if parentGrid==0
id.mkidX();
if(id.g==0) {
printf("\n");
printf("Using X-Grid\n");
printf("Grid: {%d %d %d}. Blocks: {%d %d %d}.\n",gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z);
printf("\n");
}
#elif parentGrid==1
id.mkidY();
if(id.g==0) {
printf("\n");
printf("Using Y-Grid\n");
printf("Grid: {%d %d %d}. Blocks: {%d %d %d}.\n",gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z);
printf("\n");
}
#else
id.mkidZ();
if(id.g==0) {
printf("\n");
printf("Using Z-Grid\n");
printf("Grid: {%d %d %d}. Blocks: {%d %d %d}.\n",gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z);
printf("\n");
}
#endif
for (int istep=0; istep < nsteps; istep++) {
rk4Device(id);
}
}
| dc6495889ab8556a0dbf6e533da16e3a6fecbbf4.cu | #include "cuda_functions.h"
#include "cuda_globals.h"
__device__ myprec d_phi[mx*my*mz];
__device__ myprec d_rhs1[mx*my*mz];
__device__ myprec d_rhs2[mx*my*mz];
__device__ myprec d_rhs3[mx*my*mz];
__device__ myprec d_rhs4[mx*my*mz];
__device__ myprec d_temp[mx*my*mz];
__device__ myprec d_tmp[mx*my*mz];
__device__ void RHSDevice(myprec *rhs, myprec *var, Indices id) {
#if parentGrid==0
derDev1x(rhs,var,id);
#elif parentGrid==1
derDev1y(rhs,var,id);
#else
derDev1z(rhs,var,id);
#endif
rhs[id.g] = -rhs[id.g]*U;
}
__device__ void rk4Device(Indices id) {
RHSDevice(d_rhs1,d_phi,id);
d_temp[id.g] = (d_phi[id.g] + d_rhs1[id.g]*d_dt/2);
RHSDevice(d_rhs2,d_temp,id);
d_temp[id.g] = (d_phi[id.g] + d_rhs2[id.g]*d_dt/2);
RHSDevice(d_rhs3,d_temp,id);
d_temp[id.g] = (d_phi[id.g] + d_rhs3[id.g]*d_dt);
RHSDevice(d_rhs4,d_temp,id);
d_phi[id.g] = d_phi[id.g] + d_dt*
( d_rhs1[id.g] +
2*d_rhs2[id.g] +
2*d_rhs3[id.g] +
d_rhs4[id.g])/6.;
}
__global__ void runDevice() {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
#if parentGrid==0
id.mkidX();
if(id.g==0) {
printf("\n");
printf("Using X-Grid\n");
printf("Grid: {%d %d %d}. Blocks: {%d %d %d}.\n",gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z);
printf("\n");
}
#elif parentGrid==1
id.mkidY();
if(id.g==0) {
printf("\n");
printf("Using Y-Grid\n");
printf("Grid: {%d %d %d}. Blocks: {%d %d %d}.\n",gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z);
printf("\n");
}
#else
id.mkidZ();
if(id.g==0) {
printf("\n");
printf("Using Z-Grid\n");
printf("Grid: {%d %d %d}. Blocks: {%d %d %d}.\n",gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z);
printf("\n");
}
#endif
for (int istep=0; istep < nsteps; istep++) {
rk4Device(id);
}
}
|
04146526c647eb01d286d455168d58ffbf8ab477.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cassert>
__device__ float saru(unsigned int seed1, unsigned int seed2, unsigned int seed3)
{
seed3 ^= (seed1<<7)^(seed2>>6);
seed2 += (seed1>>4)^(seed3>>15);
seed1 ^= (seed2<<9)+(seed3<<8);
seed3 ^= 0xA5366B4D*((seed2>>11) ^ (seed1<<1));
seed2 += 0x72BE1579*((seed1<<4) ^ (seed3>>16));
seed1 ^= 0X3F38A6ED*((seed3>>5) ^ (((signed int)seed2)>>22));
seed2 += seed1*seed3;
seed1 += seed3 ^ (seed2>>2);
seed2 ^= ((signed int)seed2)>>17;
int state = 0x79dedea3*(seed1^(((signed int)seed1)>>14));
int wstate = (state + seed2) ^ (((signed int)state)>>8);
state = state + (wstate*(wstate^0xdddf97f5));
wstate = 0xABCB96F7 + (wstate>>1);
state = 0x4beb5d59*state + 0x2600e1f7; // LCG
wstate = wstate + 0x8009d14b + ((((signed int)wstate)>>31)&0xda879add); // OWS
unsigned int v = (state ^ (state>>26))+wstate;
unsigned int r = (v^(v>>20))*0x6957f5a7;
double res = r / (4294967295.0f);
return res;
}
#ifndef NDEBUG
#define _CHECK_
#endif
struct InfoDPD
{
int3 ncells;
int np;
float3 domainsize, invdomainsize, domainstart;
float invrc, aij, gamma, sigmaf;
float *xyzuvw, *axayaz;
};
__constant__ InfoDPD info;
texture<float2, hipTextureType1D> texParticles;
texture<int, hipTextureType1D> texStart, texCount;
#define COLS 8
#define ROWS (32 / COLS)
#define _XCPB_ 2
#define _YCPB_ 2
#define _ZCPB_ 1
#define CPB (_XCPB_ * _YCPB_ * _ZCPB_)
__global__ __launch_bounds__(32 * CPB, 16)
void _dpd_forces_saru(int idtimestep)
{
assert(warpSize == COLS * ROWS);
assert(blockDim.x == warpSize && blockDim.y == CPB && blockDim.z == 1);
assert(ROWS * 3 <= warpSize);
const int tid = threadIdx.x;
const int subtid = tid % COLS;
const int slot = tid / COLS;
const int wid = threadIdx.y;
__shared__ int volatile starts[CPB][32], scan[CPB][32];
int mycount = 0;
if (tid < 27)
{
const int dx = (1 + tid) % 3;
const int dy = (1 + (tid / 3)) % 3;
const int dz = (1 + (tid / 9)) % 3;
const int xcid = (blockIdx.x * _XCPB_ + ((threadIdx.y) % _XCPB_) + dx - 1 + info.ncells.x) % info.ncells.x;
const int ycid = (blockIdx.y * _YCPB_ + ((threadIdx.y / _XCPB_) % _YCPB_) + dy - 1 + info.ncells.y) % info.ncells.y;
const int zcid = (blockIdx.z * _ZCPB_ + ((threadIdx.y / (_XCPB_ * _YCPB_)) % _ZCPB_) + dz - 1 + info.ncells.z) % info.ncells.z;
const int cid = xcid + info.ncells.x * (ycid + info.ncells.y * zcid);
starts[wid][tid] = tex1Dfetch(texStart, cid);
mycount = tex1Dfetch(texCount, cid);
}
for(int L = 1; L < 32; L <<= 1)
mycount += (tid >= L) * __shfl_up(mycount, L) ;
if (tid < 27)
scan[wid][tid] = mycount;
const int dststart = starts[wid][0];
const int nsrc = scan[wid][26], ndst = scan[wid][0];
for(int d = 0; d < ndst; d += ROWS)
{
const int np1 = min(ndst - d, ROWS);
const int dpid = dststart + d + slot;
const int entry = 3 * dpid;
float2 dtmp0 = tex1Dfetch(texParticles, entry);
float2 dtmp1 = tex1Dfetch(texParticles, entry + 1);
float2 dtmp2 = tex1Dfetch(texParticles, entry + 2);
float f[3] = {0, 0, 0};
for(int s = 0; s < nsrc; s += COLS)
{
const int np2 = min(nsrc - s, COLS);
const int pid = s + subtid;
const int key9 = 9 * (pid >= scan[wid][8]) + 9 * (pid >= scan[wid][17]);
const int key3 = 3 * (pid >= scan[wid][key9 + 2]) + 3 * (pid >= scan[wid][key9 + 5]);
const int key1 = (pid >= scan[wid][key9 + key3]) + (pid >= scan[wid][key9 + key3 + 1]);
const int key = key9 + key3 + key1;
assert(subtid >= np2 || pid >= (key ? scan[wid][key - 1] : 0) && pid < scan[wid][key]);
const int spid = starts[wid][key] + pid - (key ? scan[wid][key - 1] : 0);
const int sentry = 3 * spid;
const float2 stmp0 = tex1Dfetch(texParticles, sentry);
const float2 stmp1 = tex1Dfetch(texParticles, sentry + 1);
const float2 stmp2 = tex1Dfetch(texParticles, sentry + 2);
{
const float xforce = f[0];
const float yforce = f[1];
const float zforce = f[2];
const float xdiff = dtmp0.x - stmp0.x;
const float ydiff = dtmp0.y - stmp0.y;
const float zdiff = dtmp1.x - stmp1.x;
const float _xr = xdiff - info.domainsize.x * floorf(0.5f + xdiff * info.invdomainsize.x);
const float _yr = ydiff - info.domainsize.y * floorf(0.5f + ydiff * info.invdomainsize.y);
const float _zr = zdiff - info.domainsize.z * floorf(0.5f + zdiff * info.invdomainsize.z);
const float rij2 = _xr * _xr + _yr * _yr + _zr * _zr;
const float invrij = rsqrtf(rij2);
const float rij = rij2 * invrij;
const float wr = max((float)0, 1 - rij * info.invrc);
const float xr = _xr * invrij;
const float yr = _yr * invrij;
const float zr = _zr * invrij;
const float rdotv =
xr * (dtmp1.y - stmp1.y) +
yr * (dtmp2.x - stmp2.x) +
zr * (dtmp2.y - stmp2.y);
const float mysaru = saru(min(spid, dpid), max(spid, dpid), idtimestep);
const float myrandnr = 3.464101615f * mysaru - 1.732050807f;
const float strength = (info.aij - info.gamma * wr * rdotv + info.sigmaf * myrandnr) * wr;
const bool valid = (d + slot != s + subtid) && (slot < np1) && (subtid < np2);
if (valid)
{
#ifdef _CHECK_
f[0] = xforce + (rij2 < 1);
f[1] = yforce + wr;
f[2] = zforce + 0;
#else
f[0] = xforce + strength * xr;
f[1] = yforce + strength * yr;
f[2] = zforce + strength * zr;
#endif
}
}
}
for(int L = COLS / 2; L > 0; L >>=1)
for(int c = 0; c < 3; ++c)
f[c] += __shfl_xor(f[c], L);
const float fcontrib = f[subtid % 3];
const int dstpid = dststart + d + slot;
const int c = (subtid % 3);
if (slot < np1)
info.axayaz[c + 3 * dstpid] = fcontrib;
}
}
#include <cmath>
#include <unistd.h>
#include <thrust/device_vector.h>
using namespace thrust;
#include "profiler-dpd.h"
#include "cell-lists.h"
#define CUDA_CHECK(ans) do { cudaAssert((ans), __FILE__, __LINE__); } while(0)
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
sleep(5);
if (abort) exit(code);
}
}
template<typename T> T * _ptr(device_vector<T>& v) { return raw_pointer_cast(v.data()); }
ProfilerDPD * myprof = NULL;
void forces_dpd_cuda(float * const _xyzuvw, float * const _axayaz,
int * const order, const int np,
const float rc,
const float XL, const float YL, const float ZL,
const float aij,
const float gamma,
const float sigma,
const float invsqrtdt)
{
if (myprof == NULL)
myprof = new ProfilerDPD();
int nx = (int)ceil(XL / rc);
int ny = (int)ceil(YL / rc);
int nz = (int)ceil(ZL / rc);
const int ncells = nx * ny * nz;
device_vector<float> xyzuvw(_xyzuvw, _xyzuvw + np * 6), axayaz(np * 3);
InfoDPD c;
c.ncells = make_int3(nx, ny, nz);
c.np = np;
c.domainsize = make_float3(XL, YL, ZL);
c.invdomainsize = make_float3(1 / XL, 1 / YL, 1 / ZL);
c.domainstart = make_float3(-XL * 0.5, -YL * 0.5, -ZL * 0.5);
c.invrc = 1.f / rc;
c.aij = aij;
c.gamma = gamma;
c.sigmaf = sigma * invsqrtdt;
c.xyzuvw = _ptr(xyzuvw);
c.axayaz = _ptr(axayaz);
CUDA_CHECK(hipMemcpyToSymbol(info, &c, sizeof(c)));
device_vector<int> starts(ncells), ends(ncells);
build_clists(_ptr(xyzuvw), np, rc, c.ncells.x, c.ncells.y, c.ncells.z,
c.domainstart.x, c.domainstart.y, c.domainstart.z,
order, _ptr(starts), _ptr(ends));
{
size_t textureoffset = 0;
hipChannelFormatDesc fmt = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindSigned);
texStart.channelDesc = fmt;
texStart.filterMode = hipFilterModePoint;
texStart.mipmapFilterMode = hipFilterModePoint;
texStart.normalized = 0;
hipBindTexture(&textureoffset, &texStart, _ptr(starts), &fmt, sizeof(int) * (ncells));
texCount.channelDesc = fmt;
texCount.filterMode = hipFilterModePoint;
texCount.mipmapFilterMode = hipFilterModePoint;
texCount.normalized = 0;
hipBindTexture(&textureoffset, &texCount, _ptr(ends), &fmt, sizeof(int) * (ncells));
fmt = hipCreateChannelDesc<float2>();
texParticles.channelDesc = fmt;
texParticles.filterMode = hipFilterModePoint;
texParticles.mipmapFilterMode = hipFilterModePoint;
texParticles.normalized = 0;
hipBindTexture(&textureoffset, &texParticles, c.xyzuvw, &fmt, sizeof(float) * 6 * np);
}
myprof->start();
static int tid = 0;
hipLaunchKernelGGL(( _dpd_forces_saru), dim3(c.ncells.x / _XCPB_,
c.ncells.y / _YCPB_,
c.ncells.z / _ZCPB_), dim3(dim3(32, CPB)), 0, 0, tid);
++tid;
CUDA_CHECK(hipPeekAtLastError());
myprof->force();
myprof->report();
copy(xyzuvw.begin(), xyzuvw.end(), _xyzuvw);
copy(axayaz.begin(), axayaz.end(), _axayaz);
#ifdef _CHECK_
CUDA_CHECK(hipDeviceSynchronize());
for(int i = 0; i < np; ++i)
{
printf("pid %d -> %f %f %f\n", i, (float)axayaz[0 + 3 * i], (float)axayaz[1 + 3* i], (float)axayaz[2 + 3 *i]);
int cnt = 0;
float fc = 0;
printf("devi coords are %f %f %f\n", (float)xyzuvw[0 + 6 * i], (float)xyzuvw[1 + 6 * i], (float)xyzuvw[2 + 6 * i]);
printf("host coords are %f %f %f\n", (float)_xyzuvw[0 + 6 * i], (float)_xyzuvw[1 + 6 * i], (float)_xyzuvw[2 + 6 * i]);
for(int j = 0; j < np; ++j)
{
if (i == j)
continue;
float xr = _xyzuvw[0 + 6 *i] - _xyzuvw[0 + 6 * j];
float yr = _xyzuvw[1 + 6 *i] - _xyzuvw[1 + 6 * j];
float zr = _xyzuvw[2 + 6 *i] - _xyzuvw[2 + 6 * j];
xr -= c.domainsize.x * ::floor(0.5f + xr / c.domainsize.x);
yr -= c.domainsize.y * ::floor(0.5f + yr / c.domainsize.y);
zr -= c.domainsize.z * ::floor(0.5f + zr / c.domainsize.z);
const float rij2 = xr * xr + yr * yr + zr * zr;
const float invrij = rsqrtf(rij2);
const float rij = rij2 * invrij;
const float wr = max((float)0, 1 - rij * c.invrc);
const bool collision = rij2 < 1;
if (collision)
fc += wr;// printf("ref p %d colliding with %d\n", i, j);
cnt += collision;
}
printf("i found %d host interactions and with cuda i found %d\n", cnt, (int)axayaz[0 + 3 * i]);
assert(cnt == (float)axayaz[0 + 3 * i]);
printf("fc aij ref %f vs cuda %e\n", fc, (float)axayaz[1 + 3 * i]);
assert(fabs(fc - (float)axayaz[1 + 3 * i]) < 1e-4);
}
printf("test done.\n");
sleep(1);
exit(0);
#endif
}
void forces_dpd_cuda(float * const xp, float * const yp, float * const zp,
float * const xv, float * const yv, float * const zv,
float * const xa, float * const ya, float * const za,
int * const order, const int np,
const float rc,
const float LX, const float LY, const float LZ,
const float aij,
const float gamma,
const float sigma,
const float invsqrtdt)
{
float * pv = new float[6 * np];
for(int i = 0; i < np; ++i)
{
pv[0 + 6 * i] = xp[i];
pv[1 + 6 * i] = yp[i];
pv[2 + 6 * i] = zp[i];
pv[3 + 6 * i] = xv[i];
pv[4 + 6 * i] = yv[i];
pv[5 + 6 * i] = zv[i];
}
float * a = new float[3 * np];
forces_dpd_cuda(pv, a, order, np, rc, LX, LY, LZ,
aij, gamma, sigma, invsqrtdt);
for(int i = 0; i < np; ++i)
{
xp[i] = pv[0 + 6 * i];
yp[i] = pv[1 + 6 * i];
zp[i] = pv[2 + 6 * i];
xv[i] = pv[3 + 6 * i];
yv[i] = pv[4 + 6 * i];
zv[i] = pv[5 + 6 * i];
}
delete [] pv;
for(int i = 0; i < np; ++i)
{
xa[i] = a[0 + 3 * i];
ya[i] = a[1 + 3 * i];
za[i] = a[2 + 3 * i];
}
delete [] a;
} | 04146526c647eb01d286d455168d58ffbf8ab477.cu | #include <cstdio>
#include <cassert>
__device__ float saru(unsigned int seed1, unsigned int seed2, unsigned int seed3)
{
seed3 ^= (seed1<<7)^(seed2>>6);
seed2 += (seed1>>4)^(seed3>>15);
seed1 ^= (seed2<<9)+(seed3<<8);
seed3 ^= 0xA5366B4D*((seed2>>11) ^ (seed1<<1));
seed2 += 0x72BE1579*((seed1<<4) ^ (seed3>>16));
seed1 ^= 0X3F38A6ED*((seed3>>5) ^ (((signed int)seed2)>>22));
seed2 += seed1*seed3;
seed1 += seed3 ^ (seed2>>2);
seed2 ^= ((signed int)seed2)>>17;
int state = 0x79dedea3*(seed1^(((signed int)seed1)>>14));
int wstate = (state + seed2) ^ (((signed int)state)>>8);
state = state + (wstate*(wstate^0xdddf97f5));
wstate = 0xABCB96F7 + (wstate>>1);
state = 0x4beb5d59*state + 0x2600e1f7; // LCG
wstate = wstate + 0x8009d14b + ((((signed int)wstate)>>31)&0xda879add); // OWS
unsigned int v = (state ^ (state>>26))+wstate;
unsigned int r = (v^(v>>20))*0x6957f5a7;
double res = r / (4294967295.0f);
return res;
}
#ifndef NDEBUG
#define _CHECK_
#endif
struct InfoDPD
{
int3 ncells;
int np;
float3 domainsize, invdomainsize, domainstart;
float invrc, aij, gamma, sigmaf;
float *xyzuvw, *axayaz;
};
__constant__ InfoDPD info;
texture<float2, cudaTextureType1D> texParticles;
texture<int, cudaTextureType1D> texStart, texCount;
#define COLS 8
#define ROWS (32 / COLS)
#define _XCPB_ 2
#define _YCPB_ 2
#define _ZCPB_ 1
#define CPB (_XCPB_ * _YCPB_ * _ZCPB_)
__global__ __launch_bounds__(32 * CPB, 16)
void _dpd_forces_saru(int idtimestep)
{
assert(warpSize == COLS * ROWS);
assert(blockDim.x == warpSize && blockDim.y == CPB && blockDim.z == 1);
assert(ROWS * 3 <= warpSize);
const int tid = threadIdx.x;
const int subtid = tid % COLS;
const int slot = tid / COLS;
const int wid = threadIdx.y;
__shared__ int volatile starts[CPB][32], scan[CPB][32];
int mycount = 0;
if (tid < 27)
{
const int dx = (1 + tid) % 3;
const int dy = (1 + (tid / 3)) % 3;
const int dz = (1 + (tid / 9)) % 3;
const int xcid = (blockIdx.x * _XCPB_ + ((threadIdx.y) % _XCPB_) + dx - 1 + info.ncells.x) % info.ncells.x;
const int ycid = (blockIdx.y * _YCPB_ + ((threadIdx.y / _XCPB_) % _YCPB_) + dy - 1 + info.ncells.y) % info.ncells.y;
const int zcid = (blockIdx.z * _ZCPB_ + ((threadIdx.y / (_XCPB_ * _YCPB_)) % _ZCPB_) + dz - 1 + info.ncells.z) % info.ncells.z;
const int cid = xcid + info.ncells.x * (ycid + info.ncells.y * zcid);
starts[wid][tid] = tex1Dfetch(texStart, cid);
mycount = tex1Dfetch(texCount, cid);
}
for(int L = 1; L < 32; L <<= 1)
mycount += (tid >= L) * __shfl_up(mycount, L) ;
if (tid < 27)
scan[wid][tid] = mycount;
const int dststart = starts[wid][0];
const int nsrc = scan[wid][26], ndst = scan[wid][0];
for(int d = 0; d < ndst; d += ROWS)
{
const int np1 = min(ndst - d, ROWS);
const int dpid = dststart + d + slot;
const int entry = 3 * dpid;
float2 dtmp0 = tex1Dfetch(texParticles, entry);
float2 dtmp1 = tex1Dfetch(texParticles, entry + 1);
float2 dtmp2 = tex1Dfetch(texParticles, entry + 2);
float f[3] = {0, 0, 0};
for(int s = 0; s < nsrc; s += COLS)
{
const int np2 = min(nsrc - s, COLS);
const int pid = s + subtid;
const int key9 = 9 * (pid >= scan[wid][8]) + 9 * (pid >= scan[wid][17]);
const int key3 = 3 * (pid >= scan[wid][key9 + 2]) + 3 * (pid >= scan[wid][key9 + 5]);
const int key1 = (pid >= scan[wid][key9 + key3]) + (pid >= scan[wid][key9 + key3 + 1]);
const int key = key9 + key3 + key1;
assert(subtid >= np2 || pid >= (key ? scan[wid][key - 1] : 0) && pid < scan[wid][key]);
const int spid = starts[wid][key] + pid - (key ? scan[wid][key - 1] : 0);
const int sentry = 3 * spid;
const float2 stmp0 = tex1Dfetch(texParticles, sentry);
const float2 stmp1 = tex1Dfetch(texParticles, sentry + 1);
const float2 stmp2 = tex1Dfetch(texParticles, sentry + 2);
{
const float xforce = f[0];
const float yforce = f[1];
const float zforce = f[2];
const float xdiff = dtmp0.x - stmp0.x;
const float ydiff = dtmp0.y - stmp0.y;
const float zdiff = dtmp1.x - stmp1.x;
const float _xr = xdiff - info.domainsize.x * floorf(0.5f + xdiff * info.invdomainsize.x);
const float _yr = ydiff - info.domainsize.y * floorf(0.5f + ydiff * info.invdomainsize.y);
const float _zr = zdiff - info.domainsize.z * floorf(0.5f + zdiff * info.invdomainsize.z);
const float rij2 = _xr * _xr + _yr * _yr + _zr * _zr;
const float invrij = rsqrtf(rij2);
const float rij = rij2 * invrij;
const float wr = max((float)0, 1 - rij * info.invrc);
const float xr = _xr * invrij;
const float yr = _yr * invrij;
const float zr = _zr * invrij;
const float rdotv =
xr * (dtmp1.y - stmp1.y) +
yr * (dtmp2.x - stmp2.x) +
zr * (dtmp2.y - stmp2.y);
const float mysaru = saru(min(spid, dpid), max(spid, dpid), idtimestep);
const float myrandnr = 3.464101615f * mysaru - 1.732050807f;
const float strength = (info.aij - info.gamma * wr * rdotv + info.sigmaf * myrandnr) * wr;
const bool valid = (d + slot != s + subtid) && (slot < np1) && (subtid < np2);
if (valid)
{
#ifdef _CHECK_
f[0] = xforce + (rij2 < 1);
f[1] = yforce + wr;
f[2] = zforce + 0;
#else
f[0] = xforce + strength * xr;
f[1] = yforce + strength * yr;
f[2] = zforce + strength * zr;
#endif
}
}
}
for(int L = COLS / 2; L > 0; L >>=1)
for(int c = 0; c < 3; ++c)
f[c] += __shfl_xor(f[c], L);
const float fcontrib = f[subtid % 3];
const int dstpid = dststart + d + slot;
const int c = (subtid % 3);
if (slot < np1)
info.axayaz[c + 3 * dstpid] = fcontrib;
}
}
#include <cmath>
#include <unistd.h>
#include <thrust/device_vector.h>
using namespace thrust;
#include "profiler-dpd.h"
#include "cell-lists.h"
#define CUDA_CHECK(ans) do { cudaAssert((ans), __FILE__, __LINE__); } while(0)
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
sleep(5);
if (abort) exit(code);
}
}
template<typename T> T * _ptr(device_vector<T>& v) { return raw_pointer_cast(v.data()); }
ProfilerDPD * myprof = NULL;
void forces_dpd_cuda(float * const _xyzuvw, float * const _axayaz,
int * const order, const int np,
const float rc,
const float XL, const float YL, const float ZL,
const float aij,
const float gamma,
const float sigma,
const float invsqrtdt)
{
if (myprof == NULL)
myprof = new ProfilerDPD();
int nx = (int)ceil(XL / rc);
int ny = (int)ceil(YL / rc);
int nz = (int)ceil(ZL / rc);
const int ncells = nx * ny * nz;
device_vector<float> xyzuvw(_xyzuvw, _xyzuvw + np * 6), axayaz(np * 3);
InfoDPD c;
c.ncells = make_int3(nx, ny, nz);
c.np = np;
c.domainsize = make_float3(XL, YL, ZL);
c.invdomainsize = make_float3(1 / XL, 1 / YL, 1 / ZL);
c.domainstart = make_float3(-XL * 0.5, -YL * 0.5, -ZL * 0.5);
c.invrc = 1.f / rc;
c.aij = aij;
c.gamma = gamma;
c.sigmaf = sigma * invsqrtdt;
c.xyzuvw = _ptr(xyzuvw);
c.axayaz = _ptr(axayaz);
CUDA_CHECK(cudaMemcpyToSymbol(info, &c, sizeof(c)));
device_vector<int> starts(ncells), ends(ncells);
build_clists(_ptr(xyzuvw), np, rc, c.ncells.x, c.ncells.y, c.ncells.z,
c.domainstart.x, c.domainstart.y, c.domainstart.z,
order, _ptr(starts), _ptr(ends));
{
size_t textureoffset = 0;
cudaChannelFormatDesc fmt = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindSigned);
texStart.channelDesc = fmt;
texStart.filterMode = cudaFilterModePoint;
texStart.mipmapFilterMode = cudaFilterModePoint;
texStart.normalized = 0;
cudaBindTexture(&textureoffset, &texStart, _ptr(starts), &fmt, sizeof(int) * (ncells));
texCount.channelDesc = fmt;
texCount.filterMode = cudaFilterModePoint;
texCount.mipmapFilterMode = cudaFilterModePoint;
texCount.normalized = 0;
cudaBindTexture(&textureoffset, &texCount, _ptr(ends), &fmt, sizeof(int) * (ncells));
fmt = cudaCreateChannelDesc<float2>();
texParticles.channelDesc = fmt;
texParticles.filterMode = cudaFilterModePoint;
texParticles.mipmapFilterMode = cudaFilterModePoint;
texParticles.normalized = 0;
cudaBindTexture(&textureoffset, &texParticles, c.xyzuvw, &fmt, sizeof(float) * 6 * np);
}
myprof->start();
static int tid = 0;
_dpd_forces_saru<<<dim3(c.ncells.x / _XCPB_,
c.ncells.y / _YCPB_,
c.ncells.z / _ZCPB_), dim3(32, CPB)>>>(tid);
++tid;
CUDA_CHECK(cudaPeekAtLastError());
myprof->force();
myprof->report();
copy(xyzuvw.begin(), xyzuvw.end(), _xyzuvw);
copy(axayaz.begin(), axayaz.end(), _axayaz);
#ifdef _CHECK_
CUDA_CHECK(cudaThreadSynchronize());
for(int i = 0; i < np; ++i)
{
printf("pid %d -> %f %f %f\n", i, (float)axayaz[0 + 3 * i], (float)axayaz[1 + 3* i], (float)axayaz[2 + 3 *i]);
int cnt = 0;
float fc = 0;
printf("devi coords are %f %f %f\n", (float)xyzuvw[0 + 6 * i], (float)xyzuvw[1 + 6 * i], (float)xyzuvw[2 + 6 * i]);
printf("host coords are %f %f %f\n", (float)_xyzuvw[0 + 6 * i], (float)_xyzuvw[1 + 6 * i], (float)_xyzuvw[2 + 6 * i]);
for(int j = 0; j < np; ++j)
{
if (i == j)
continue;
float xr = _xyzuvw[0 + 6 *i] - _xyzuvw[0 + 6 * j];
float yr = _xyzuvw[1 + 6 *i] - _xyzuvw[1 + 6 * j];
float zr = _xyzuvw[2 + 6 *i] - _xyzuvw[2 + 6 * j];
xr -= c.domainsize.x * ::floor(0.5f + xr / c.domainsize.x);
yr -= c.domainsize.y * ::floor(0.5f + yr / c.domainsize.y);
zr -= c.domainsize.z * ::floor(0.5f + zr / c.domainsize.z);
const float rij2 = xr * xr + yr * yr + zr * zr;
const float invrij = rsqrtf(rij2);
const float rij = rij2 * invrij;
const float wr = max((float)0, 1 - rij * c.invrc);
const bool collision = rij2 < 1;
if (collision)
fc += wr;// printf("ref p %d colliding with %d\n", i, j);
cnt += collision;
}
printf("i found %d host interactions and with cuda i found %d\n", cnt, (int)axayaz[0 + 3 * i]);
assert(cnt == (float)axayaz[0 + 3 * i]);
printf("fc aij ref %f vs cuda %e\n", fc, (float)axayaz[1 + 3 * i]);
assert(fabs(fc - (float)axayaz[1 + 3 * i]) < 1e-4);
}
printf("test done.\n");
sleep(1);
exit(0);
#endif
}
void forces_dpd_cuda(float * const xp, float * const yp, float * const zp,
float * const xv, float * const yv, float * const zv,
float * const xa, float * const ya, float * const za,
int * const order, const int np,
const float rc,
const float LX, const float LY, const float LZ,
const float aij,
const float gamma,
const float sigma,
const float invsqrtdt)
{
float * pv = new float[6 * np];
for(int i = 0; i < np; ++i)
{
pv[0 + 6 * i] = xp[i];
pv[1 + 6 * i] = yp[i];
pv[2 + 6 * i] = zp[i];
pv[3 + 6 * i] = xv[i];
pv[4 + 6 * i] = yv[i];
pv[5 + 6 * i] = zv[i];
}
float * a = new float[3 * np];
forces_dpd_cuda(pv, a, order, np, rc, LX, LY, LZ,
aij, gamma, sigma, invsqrtdt);
for(int i = 0; i < np; ++i)
{
xp[i] = pv[0 + 6 * i];
yp[i] = pv[1 + 6 * i];
zp[i] = pv[2 + 6 * i];
xv[i] = pv[3 + 6 * i];
yv[i] = pv[4 + 6 * i];
zv[i] = pv[5 + 6 * i];
}
delete [] pv;
for(int i = 0; i < np; ++i)
{
xa[i] = a[0 + 3 * i];
ya[i] = a[1 + 3 * i];
za[i] = a[2 + 3 * i];
}
delete [] a;
} |
a5cde406da654570b2cd4aff12f30d342f39c824.hip | // !!! This is a file automatically generated by hipify!!!
// Cuckoo Cycle, a memory-hard proof-of-work
// Copyright (c) 2013-2015 John Tromp
// The edge-trimming time-memory trade-off is due to Dave Anderson:
// http://da-data.blogspot.com/2014/03/a-public-review-of-cuckoo-cycle.html
#include <stdint.h>
#include <string.h>
#include <time.h>
#include "cuckoo.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#if SIZESHIFT <= 32
typedef u32 nonce_t;
typedef u32 node_t;
#else
typedef u64 nonce_t;
typedef u64 node_t;
#endif
#include <openssl/sha.h>
// d(evice s)ipnode
#if (__CUDA_ARCH__ >= 320) // redefine ROTL to use funnel shifter, 3% speed gain
static __device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); }
static __device__ __forceinline__ void operator^= (uint2 &a, uint2 b) { a.x ^= b.x, a.y ^= b.y; }
static __device__ __forceinline__ void operator+= (uint2 &a, uint2 b) {
asm("{\n\tadd.cc.u32 %0,%2,%4;\n\taddc.u32 %1,%3,%5;\n\t}\n\t"
: "=r"(a.x), "=r"(a.y) : "r"(a.x), "r"(a.y), "r"(b.x), "r"(b.y));
}
#undef ROTL
__inline__ __device__ uint2 ROTL(const uint2 a, const int offset) {
uint2 result;
if (offset >= 32) {
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset));
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset));
} else {
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset));
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset));
}
return result;
}
__device__ __forceinline__ uint2 vectorize(const uint64_t x) {
uint2 result;
asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x));
return result;
}
__device__ __forceinline__ uint64_t devectorize(uint2 x) {
uint64_t result;
asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y));
return result;
}
__device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) {
uint2 nonce = vectorize(2*nce + uorv);
uint2 v0 = ctx.v2[0], v1 = ctx.v2[1], v2 = ctx.v2[2], v3 = ctx.v2[3] ^ nonce;
SIPROUND; SIPROUND;
v0 ^= nonce;
v2 ^= vectorize(0xff);
SIPROUND; SIPROUND; SIPROUND; SIPROUND;
return devectorize(v0 ^ v1 ^ v2 ^ v3) & NODEMASK;
}
#else
__device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) {
u64 nonce = 2*nce + uorv;
u64 v0 = ctx.v[0], v1 = ctx.v[1], v2 = ctx.v[2], v3 = ctx.v[3] ^ nonce;
SIPROUND; SIPROUND;
v0 ^= nonce;
v2 ^= 0xff;
SIPROUND; SIPROUND; SIPROUND; SIPROUND;
return (v0 ^ v1 ^ v2 ^ v3) & NODEMASK;
}
#endif
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <set>
// algorithm parameters
#ifndef PART_BITS
// #bits used to partition edge set processing to save memory
// a value of 0 does no partitioning and is fastest
// a value of 1 partitions in two, making twice_set the
// same size as shrinkingset at about 33% slowdown
// higher values are not that interesting
#define PART_BITS 0
#endif
#ifndef IDXSHIFT
// we want sizeof(cuckoo_hash) == sizeof(twice_set), so
// CUCKOO_SIZE * sizeof(u64) == TWICE_WORDS * sizeof(u32)
// CUCKOO_SIZE * 2 == TWICE_WORDS
// (SIZE >> IDXSHIFT) * 2 == 2 * ONCE_BITS / 32
// SIZE >> IDXSHIFT == HALFSIZE >> PART_BITS >> 5
// IDXSHIFT == 1 + PART_BITS + 5
#define IDXSHIFT (PART_BITS + 6)
#endif
// grow with cube root of size, hardly affected by trimming
#define MAXPATHLEN (8 << (SIZESHIFT/3))
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// set that starts out full and gets reset by threads on disjoint words
class shrinkingset {
public:
u32 *bits;
__device__ void reset(nonce_t n) {
bits[n/32] |= 1 << (n%32);
}
__device__ bool test(node_t n) const {
return !((bits[n/32] >> (n%32)) & 1);
}
__device__ u32 block(node_t n) const {
return ~bits[n/32];
}
};
#define PART_MASK ((1 << PART_BITS) - 1)
#define ONCE_BITS (HALFSIZE >> PART_BITS)
#define TWICE_WORDS ((2 * ONCE_BITS) / 32)
class twice_set {
public:
u32 *bits;
__device__ void reset() {
memset(bits, 0, TWICE_WORDS * sizeof(u32));
}
__device__ void set(node_t u) {
node_t idx = u / 16;
u32 bit = 1 << (2 * (u % 16));
u32 old = atomicOr(&bits[idx], bit);
u32 bit2 = bit << 1;
if ((old & (bit2 | bit)) == bit) atomicOr(&bits[idx], bit2);
}
__device__ u32 test(node_t u) const {
return (bits[u/16] >> (2 * (u%16))) & 2;
}
};
#define CUCKOO_SIZE (SIZE >> IDXSHIFT)
#define CUCKOO_MASK (CUCKOO_SIZE - 1)
// number of (least significant) key bits that survives leftshift by SIZESHIFT
#define KEYBITS (64-SIZESHIFT)
#define KEYMASK ((1L << KEYBITS) - 1)
#define MAXDRIFT (1L << (KEYBITS - IDXSHIFT))
class cuckoo_hash {
public:
u64 *cuckoo;
cuckoo_hash() {
cuckoo = (u64 *)calloc(CUCKOO_SIZE, sizeof(u64));
assert(cuckoo != 0);
}
~cuckoo_hash() {
free(cuckoo);
}
void set(node_t u, node_t v) {
u64 niew = (u64)u << SIZESHIFT | v;
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
#ifdef ATOMIC
u64 old = 0;
if (cuckoo[ui].compare_exchange_strong(old, niew, std::memory_order_relaxed))
return;
if ((old >> SIZESHIFT) == (u & KEYMASK)) {
cuckoo[ui].store(niew, std::memory_order_relaxed);
#else
u64 old = cuckoo[ui];
if (old == 0 || (old >> SIZESHIFT) == (u & KEYMASK)) {
cuckoo[ui] = niew;
#endif
return;
}
}
}
node_t operator[](node_t u) const {
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
#ifdef ATOMIC
u64 cu = cuckoo[ui].load(std::memory_order_relaxed);
#else
u64 cu = cuckoo[ui];
#endif
if (!cu)
return 0;
if ((cu >> SIZESHIFT) == (u & KEYMASK)) {
assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT);
return (node_t)(cu & (SIZE-1));
}
}
}
};
class cuckoo_ctx {
public:
siphash_ctx sip_ctx;
shrinkingset alive;
twice_set nonleaf;
int nthreads;
cuckoo_ctx(const char* header, u32 n_threads) {
setheader(&sip_ctx, header);
nthreads = n_threads;
}
};
#define TPB 128
__global__ void
__launch_bounds__(TPB, 1)
count_node_deg(cuckoo_ctx *ctx, u32 uorv, u32 part) {
shrinkingset &alive = ctx->alive;
twice_set &nonleaf = ctx->nonleaf;
siphash_ctx sip_ctx = ctx->sip_ctx; // local copy sip context; 2.5% speed gain
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) {
u32 alive32 = alive.block(block);
for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs
u32 ffs = __ffs(alive32);
nonce += ffs; alive32 >>= ffs;
node_t u = dipnode(sip_ctx, nonce, uorv);
if ((u & PART_MASK) == part) {
nonleaf.set(u >> PART_BITS);
}
}
}
}
__global__ void
__launch_bounds__(TPB, 1)
kill_leaf_edges(cuckoo_ctx *ctx, u32 uorv, u32 part) {
shrinkingset &alive = ctx->alive;
twice_set &nonleaf = ctx->nonleaf;
siphash_ctx sip_ctx = ctx->sip_ctx;
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) {
u32 alive32 = alive.block(block);
for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs
u32 ffs = __ffs(alive32);
nonce += ffs; alive32 >>= ffs;
node_t u = dipnode(sip_ctx, nonce, uorv);
if ((u & PART_MASK) == part) {
if (!nonleaf.test(u >> PART_BITS)) {
alive.reset(nonce);
}
}
}
}
}
u32 path(cuckoo_hash &cuckoo, node_t u, node_t *us) {
u32 nu;
for (nu = 0; u; u = cuckoo[u]) {
if (++nu >= MAXPATHLEN) {
while (nu-- && us[nu] != u) ;
if (nu == ~0)
printf("maximum path length exceeded\n");
else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu);
exit(0);
}
us[nu] = u;
}
return nu;
}
typedef std::pair<node_t,node_t> edge;
#ifndef WIN32
#include <unistd.h>
#else
#include "getopt/getopt.h"
#endif
int main(int argc, char **argv) {
int nthreads = 1;
int ntrims = 1 + (PART_BITS+3)*(PART_BITS+4)/2;
int tpb = 0;
const char *header = "";
bool profiling = false;
int c;
while ((c = getopt (argc, argv, "h:n:t:p:")) != -1) {
switch (c) {
case 'h':
header = optarg;
break;
case 'n':
ntrims = atoi(optarg);
break;
case 't':
nthreads = atoi(optarg);
break;
case 'p':
tpb = atoi(optarg);
break;
}
}
if (!tpb) // if not set, then default threads per block to roughly square root of threads
for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ;
printf("Looking for %d-cycle on cuckoo%d(\"%s\") with 50%% edges, %d trims, %d threads %d per block\n",
PROOFSIZE, SIZESHIFT, header, ntrims, nthreads, tpb);
u64 edgeBytes = HALFSIZE/8, nodeBytes = TWICE_WORDS*sizeof(u32);
cuckoo_ctx ctx(header, nthreads);
checkCudaErrors(hipMalloc((void**)&ctx.alive.bits, edgeBytes));
checkCudaErrors(hipMemset(ctx.alive.bits, 0, edgeBytes));
checkCudaErrors(hipMalloc((void**)&ctx.nonleaf.bits, nodeBytes));
int edgeUnit=0, nodeUnit=0;
u64 eb = edgeBytes, nb = nodeBytes;
for (; eb >= 1024; eb>>=10) edgeUnit++;
for (; nb >= 1024; nb>>=10) nodeUnit++;
printf("Using %d%cB edge and %d%cB node memory.\n",
(int)eb, " KMGT"[edgeUnit], (int)nb, " KMGT"[nodeUnit]);
cuckoo_ctx *device_ctx;
checkCudaErrors(hipMalloc((void**)&device_ctx, sizeof(cuckoo_ctx)));
hipMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), hipMemcpyHostToDevice);
hipEvent_t start, stop;
if (profiling) {
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
hipEventRecord(start, nullptr);
}
for (u32 round=0; round < ntrims; round++) {
for (u32 uorv = 0; uorv < 2; uorv++) {
for (u32 part = 0; part <= PART_MASK; part++) {
checkCudaErrors(hipMemset(ctx.nonleaf.bits, 0, nodeBytes));
hipLaunchKernelGGL(( count_node_deg), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_ctx, uorv, part);
hipLaunchKernelGGL(( kill_leaf_edges), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_ctx, uorv, part);
}
}
}
if (profiling) {
hipEventRecord(stop, nullptr);
hipEventSynchronize(stop);
u64 *bits;
bits = (u64 *)calloc(HALFSIZE/64, sizeof(u64));
assert(bits != 0);
hipMemcpy(bits, ctx.alive.bits, (HALFSIZE/64) * sizeof(u64), hipMemcpyDeviceToHost);
checkCudaErrors(hipFree(ctx.alive.bits));
checkCudaErrors(hipFree(ctx.nonleaf.bits));
u32 cnt = 0;
for (int i = 0; i < HALFSIZE/64; i++)
cnt += __builtin_popcountll(~bits[i]);
u32 load = (u32)(100L * cnt / CUCKOO_SIZE);
printf("final load %d%%\n", load);
if (load >= 90) {
printf("overloaded! exiting...");
exit(0);
}
cuckoo_hash &cuckoo = *(new cuckoo_hash());
node_t us[MAXPATHLEN], vs[MAXPATHLEN];
for (nonce_t block = 0; block < HALFSIZE; block += 64) {
u64 alive64 = ~bits[block/64];
for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs
u32 ffs = __builtin_ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
node_t u0=sipnode(&ctx.sip_ctx, nonce, 0), v0=sipnode(&ctx.sip_ctx, nonce, 1);
if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[]
continue;
node_t u = cuckoo[us[0] = u0], v = cuckoo[vs[0] = v0];
u32 nu = path(cuckoo, u, us), nv = path(cuckoo, v, vs);
if (us[nu] == vs[nv]) {
u32 min = nu < nv ? nu : nv;
for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ;
u32 len = nu + nv + 1;
printf("% 4d-cycle found at %d:%d%%\n", len, 0, (u32)(nonce*100L/HALFSIZE));
if (len == PROOFSIZE) {
printf("Solution");
std::set<edge> cycle;
u32 n = 0;
cycle.insert(edge(*us, *vs));
while (nu--)
cycle.insert(edge(us[(nu+1)&~1], us[nu|1])); // u's in even position; v's in odd
while (nv--)
cycle.insert(edge(vs[nv|1], vs[(nv+1)&~1])); // u's in odd position; v's in even
for (nonce_t blk = 0; blk < HALFSIZE; blk += 64) {
u64 alv64 = ~bits[blk/64];
for (nonce_t nce = blk-1; alv64; ) { // -1 compensates for 1-based ffs
u32 ffs = __builtin_ffsll(alv64);
nce += ffs; alv64 >>= ffs;
edge e(sipnode(&ctx.sip_ctx, nce, 0), sipnode(&ctx.sip_ctx, nce, 1));
if (cycle.find(e) != cycle.end()) {
printf(" %x", nce);
if (PROOFSIZE > 2)
cycle.erase(e);
n++;
}
if (ffs & 64) break; // can't shift by 64
}
}
assert(n==PROOFSIZE);
printf("\n");
}
continue;
}
if (nu < nv) {
while (nu--)
cuckoo.set(us[nu+1], us[nu]);
cuckoo.set(u0, v0);
} else {
while (nv--)
cuckoo.set(vs[nv+1], vs[nv]);
cuckoo.set(v0, u0);
}
if (ffs & 64) break; // can't shift by 64
}
}
return 0;
}
| a5cde406da654570b2cd4aff12f30d342f39c824.cu | // Cuckoo Cycle, a memory-hard proof-of-work
// Copyright (c) 2013-2015 John Tromp
// The edge-trimming time-memory trade-off is due to Dave Anderson:
// http://da-data.blogspot.com/2014/03/a-public-review-of-cuckoo-cycle.html
#include <stdint.h>
#include <string.h>
#include <time.h>
#include "cuckoo.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#if SIZESHIFT <= 32
typedef u32 nonce_t;
typedef u32 node_t;
#else
typedef u64 nonce_t;
typedef u64 node_t;
#endif
#include <openssl/sha.h>
// d(evice s)ipnode
#if (__CUDA_ARCH__ >= 320) // redefine ROTL to use funnel shifter, 3% speed gain
static __device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); }
static __device__ __forceinline__ void operator^= (uint2 &a, uint2 b) { a.x ^= b.x, a.y ^= b.y; }
static __device__ __forceinline__ void operator+= (uint2 &a, uint2 b) {
asm("{\n\tadd.cc.u32 %0,%2,%4;\n\taddc.u32 %1,%3,%5;\n\t}\n\t"
: "=r"(a.x), "=r"(a.y) : "r"(a.x), "r"(a.y), "r"(b.x), "r"(b.y));
}
#undef ROTL
__inline__ __device__ uint2 ROTL(const uint2 a, const int offset) {
uint2 result;
if (offset >= 32) {
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset));
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset));
} else {
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset));
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset));
}
return result;
}
__device__ __forceinline__ uint2 vectorize(const uint64_t x) {
uint2 result;
asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x));
return result;
}
__device__ __forceinline__ uint64_t devectorize(uint2 x) {
uint64_t result;
asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y));
return result;
}
__device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) {
uint2 nonce = vectorize(2*nce + uorv);
uint2 v0 = ctx.v2[0], v1 = ctx.v2[1], v2 = ctx.v2[2], v3 = ctx.v2[3] ^ nonce;
SIPROUND; SIPROUND;
v0 ^= nonce;
v2 ^= vectorize(0xff);
SIPROUND; SIPROUND; SIPROUND; SIPROUND;
return devectorize(v0 ^ v1 ^ v2 ^ v3) & NODEMASK;
}
#else
__device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) {
u64 nonce = 2*nce + uorv;
u64 v0 = ctx.v[0], v1 = ctx.v[1], v2 = ctx.v[2], v3 = ctx.v[3] ^ nonce;
SIPROUND; SIPROUND;
v0 ^= nonce;
v2 ^= 0xff;
SIPROUND; SIPROUND; SIPROUND; SIPROUND;
return (v0 ^ v1 ^ v2 ^ v3) & NODEMASK;
}
#endif
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <set>
// algorithm parameters
#ifndef PART_BITS
// #bits used to partition edge set processing to save memory
// a value of 0 does no partitioning and is fastest
// a value of 1 partitions in two, making twice_set the
// same size as shrinkingset at about 33% slowdown
// higher values are not that interesting
#define PART_BITS 0
#endif
#ifndef IDXSHIFT
// we want sizeof(cuckoo_hash) == sizeof(twice_set), so
// CUCKOO_SIZE * sizeof(u64) == TWICE_WORDS * sizeof(u32)
// CUCKOO_SIZE * 2 == TWICE_WORDS
// (SIZE >> IDXSHIFT) * 2 == 2 * ONCE_BITS / 32
// SIZE >> IDXSHIFT == HALFSIZE >> PART_BITS >> 5
// IDXSHIFT == 1 + PART_BITS + 5
#define IDXSHIFT (PART_BITS + 6)
#endif
// grow with cube root of size, hardly affected by trimming
#define MAXPATHLEN (8 << (SIZESHIFT/3))
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// set that starts out full and gets reset by threads on disjoint words
class shrinkingset {
public:
u32 *bits;
__device__ void reset(nonce_t n) {
bits[n/32] |= 1 << (n%32);
}
__device__ bool test(node_t n) const {
return !((bits[n/32] >> (n%32)) & 1);
}
__device__ u32 block(node_t n) const {
return ~bits[n/32];
}
};
#define PART_MASK ((1 << PART_BITS) - 1)
#define ONCE_BITS (HALFSIZE >> PART_BITS)
#define TWICE_WORDS ((2 * ONCE_BITS) / 32)
class twice_set {
public:
u32 *bits;
__device__ void reset() {
memset(bits, 0, TWICE_WORDS * sizeof(u32));
}
__device__ void set(node_t u) {
node_t idx = u / 16;
u32 bit = 1 << (2 * (u % 16));
u32 old = atomicOr(&bits[idx], bit);
u32 bit2 = bit << 1;
if ((old & (bit2 | bit)) == bit) atomicOr(&bits[idx], bit2);
}
__device__ u32 test(node_t u) const {
return (bits[u/16] >> (2 * (u%16))) & 2;
}
};
#define CUCKOO_SIZE (SIZE >> IDXSHIFT)
#define CUCKOO_MASK (CUCKOO_SIZE - 1)
// number of (least significant) key bits that survives leftshift by SIZESHIFT
#define KEYBITS (64-SIZESHIFT)
#define KEYMASK ((1L << KEYBITS) - 1)
#define MAXDRIFT (1L << (KEYBITS - IDXSHIFT))
class cuckoo_hash {
public:
u64 *cuckoo;
cuckoo_hash() {
cuckoo = (u64 *)calloc(CUCKOO_SIZE, sizeof(u64));
assert(cuckoo != 0);
}
~cuckoo_hash() {
free(cuckoo);
}
void set(node_t u, node_t v) {
u64 niew = (u64)u << SIZESHIFT | v;
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
#ifdef ATOMIC
u64 old = 0;
if (cuckoo[ui].compare_exchange_strong(old, niew, std::memory_order_relaxed))
return;
if ((old >> SIZESHIFT) == (u & KEYMASK)) {
cuckoo[ui].store(niew, std::memory_order_relaxed);
#else
u64 old = cuckoo[ui];
if (old == 0 || (old >> SIZESHIFT) == (u & KEYMASK)) {
cuckoo[ui] = niew;
#endif
return;
}
}
}
node_t operator[](node_t u) const {
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
#ifdef ATOMIC
u64 cu = cuckoo[ui].load(std::memory_order_relaxed);
#else
u64 cu = cuckoo[ui];
#endif
if (!cu)
return 0;
if ((cu >> SIZESHIFT) == (u & KEYMASK)) {
assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT);
return (node_t)(cu & (SIZE-1));
}
}
}
};
class cuckoo_ctx {
public:
siphash_ctx sip_ctx;
shrinkingset alive;
twice_set nonleaf;
int nthreads;
cuckoo_ctx(const char* header, u32 n_threads) {
setheader(&sip_ctx, header);
nthreads = n_threads;
}
};
#define TPB 128
__global__ void
__launch_bounds__(TPB, 1)
count_node_deg(cuckoo_ctx *ctx, u32 uorv, u32 part) {
shrinkingset &alive = ctx->alive;
twice_set &nonleaf = ctx->nonleaf;
siphash_ctx sip_ctx = ctx->sip_ctx; // local copy sip context; 2.5% speed gain
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) {
u32 alive32 = alive.block(block);
for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs
u32 ffs = __ffs(alive32);
nonce += ffs; alive32 >>= ffs;
node_t u = dipnode(sip_ctx, nonce, uorv);
if ((u & PART_MASK) == part) {
nonleaf.set(u >> PART_BITS);
}
}
}
}
__global__ void
__launch_bounds__(TPB, 1)
kill_leaf_edges(cuckoo_ctx *ctx, u32 uorv, u32 part) {
shrinkingset &alive = ctx->alive;
twice_set &nonleaf = ctx->nonleaf;
siphash_ctx sip_ctx = ctx->sip_ctx;
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) {
u32 alive32 = alive.block(block);
for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs
u32 ffs = __ffs(alive32);
nonce += ffs; alive32 >>= ffs;
node_t u = dipnode(sip_ctx, nonce, uorv);
if ((u & PART_MASK) == part) {
if (!nonleaf.test(u >> PART_BITS)) {
alive.reset(nonce);
}
}
}
}
}
u32 path(cuckoo_hash &cuckoo, node_t u, node_t *us) {
u32 nu;
for (nu = 0; u; u = cuckoo[u]) {
if (++nu >= MAXPATHLEN) {
while (nu-- && us[nu] != u) ;
if (nu == ~0)
printf("maximum path length exceeded\n");
else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu);
exit(0);
}
us[nu] = u;
}
return nu;
}
typedef std::pair<node_t,node_t> edge;
#ifndef WIN32
#include <unistd.h>
#else
#include "getopt/getopt.h"
#endif
int main(int argc, char **argv) {
int nthreads = 1;
int ntrims = 1 + (PART_BITS+3)*(PART_BITS+4)/2;
int tpb = 0;
const char *header = "";
bool profiling = false;
int c;
while ((c = getopt (argc, argv, "h:n:t:p:")) != -1) {
switch (c) {
case 'h':
header = optarg;
break;
case 'n':
ntrims = atoi(optarg);
break;
case 't':
nthreads = atoi(optarg);
break;
case 'p':
tpb = atoi(optarg);
break;
}
}
if (!tpb) // if not set, then default threads per block to roughly square root of threads
for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ;
printf("Looking for %d-cycle on cuckoo%d(\"%s\") with 50%% edges, %d trims, %d threads %d per block\n",
PROOFSIZE, SIZESHIFT, header, ntrims, nthreads, tpb);
u64 edgeBytes = HALFSIZE/8, nodeBytes = TWICE_WORDS*sizeof(u32);
cuckoo_ctx ctx(header, nthreads);
checkCudaErrors(cudaMalloc((void**)&ctx.alive.bits, edgeBytes));
checkCudaErrors(cudaMemset(ctx.alive.bits, 0, edgeBytes));
checkCudaErrors(cudaMalloc((void**)&ctx.nonleaf.bits, nodeBytes));
int edgeUnit=0, nodeUnit=0;
u64 eb = edgeBytes, nb = nodeBytes;
for (; eb >= 1024; eb>>=10) edgeUnit++;
for (; nb >= 1024; nb>>=10) nodeUnit++;
printf("Using %d%cB edge and %d%cB node memory.\n",
(int)eb, " KMGT"[edgeUnit], (int)nb, " KMGT"[nodeUnit]);
cuckoo_ctx *device_ctx;
checkCudaErrors(cudaMalloc((void**)&device_ctx, sizeof(cuckoo_ctx)));
cudaMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
if (profiling) {
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
cudaEventRecord(start, nullptr);
}
for (u32 round=0; round < ntrims; round++) {
for (u32 uorv = 0; uorv < 2; uorv++) {
for (u32 part = 0; part <= PART_MASK; part++) {
checkCudaErrors(cudaMemset(ctx.nonleaf.bits, 0, nodeBytes));
count_node_deg<<<nthreads/tpb,tpb >>>(device_ctx, uorv, part);
kill_leaf_edges<<<nthreads/tpb,tpb >>>(device_ctx, uorv, part);
}
}
}
if (profiling) {
cudaEventRecord(stop, nullptr);
cudaEventSynchronize(stop);
u64 *bits;
bits = (u64 *)calloc(HALFSIZE/64, sizeof(u64));
assert(bits != 0);
cudaMemcpy(bits, ctx.alive.bits, (HALFSIZE/64) * sizeof(u64), cudaMemcpyDeviceToHost);
checkCudaErrors(cudaFree(ctx.alive.bits));
checkCudaErrors(cudaFree(ctx.nonleaf.bits));
u32 cnt = 0;
for (int i = 0; i < HALFSIZE/64; i++)
cnt += __builtin_popcountll(~bits[i]);
u32 load = (u32)(100L * cnt / CUCKOO_SIZE);
printf("final load %d%%\n", load);
if (load >= 90) {
printf("overloaded! exiting...");
exit(0);
}
cuckoo_hash &cuckoo = *(new cuckoo_hash());
node_t us[MAXPATHLEN], vs[MAXPATHLEN];
for (nonce_t block = 0; block < HALFSIZE; block += 64) {
u64 alive64 = ~bits[block/64];
for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs
u32 ffs = __builtin_ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
node_t u0=sipnode(&ctx.sip_ctx, nonce, 0), v0=sipnode(&ctx.sip_ctx, nonce, 1);
if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[]
continue;
node_t u = cuckoo[us[0] = u0], v = cuckoo[vs[0] = v0];
u32 nu = path(cuckoo, u, us), nv = path(cuckoo, v, vs);
if (us[nu] == vs[nv]) {
u32 min = nu < nv ? nu : nv;
for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ;
u32 len = nu + nv + 1;
printf("% 4d-cycle found at %d:%d%%\n", len, 0, (u32)(nonce*100L/HALFSIZE));
if (len == PROOFSIZE) {
printf("Solution");
std::set<edge> cycle;
u32 n = 0;
cycle.insert(edge(*us, *vs));
while (nu--)
cycle.insert(edge(us[(nu+1)&~1], us[nu|1])); // u's in even position; v's in odd
while (nv--)
cycle.insert(edge(vs[nv|1], vs[(nv+1)&~1])); // u's in odd position; v's in even
for (nonce_t blk = 0; blk < HALFSIZE; blk += 64) {
u64 alv64 = ~bits[blk/64];
for (nonce_t nce = blk-1; alv64; ) { // -1 compensates for 1-based ffs
u32 ffs = __builtin_ffsll(alv64);
nce += ffs; alv64 >>= ffs;
edge e(sipnode(&ctx.sip_ctx, nce, 0), sipnode(&ctx.sip_ctx, nce, 1));
if (cycle.find(e) != cycle.end()) {
printf(" %x", nce);
if (PROOFSIZE > 2)
cycle.erase(e);
n++;
}
if (ffs & 64) break; // can't shift by 64
}
}
assert(n==PROOFSIZE);
printf("\n");
}
continue;
}
if (nu < nv) {
while (nu--)
cuckoo.set(us[nu+1], us[nu]);
cuckoo.set(u0, v0);
} else {
while (nv--)
cuckoo.set(vs[nv+1], vs[nv]);
cuckoo.set(v0, u0);
}
if (ffs & 64) break; // can't shift by 64
}
}
return 0;
}
|
9c2c1e4ed49a5936c418049ecef1af5a4f101f54.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/strings/sorting.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
namespace cudf {
namespace strings {
namespace detail {
// return sorted version of the given strings column
std::unique_ptr<cudf::column> sort(strings_column_view strings,
sort_type stype,
cudf::order order,
cudf::null_order null_order,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
auto execpol = rmm::exec_policy(stream);
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// sort the indices of the strings
size_type num_strings = strings.size();
rmm::device_vector<size_type> indices(num_strings);
thrust::sequence(execpol->on(stream), indices.begin(), indices.end());
thrust::sort(execpol->on(stream),
indices.begin(),
indices.end(),
[d_column, stype, order, null_order] __device__(size_type lhs, size_type rhs) {
bool lhs_null{d_column.is_null(lhs)};
bool rhs_null{d_column.is_null(rhs)};
if (lhs_null || rhs_null)
return (null_order == cudf::null_order::BEFORE ? !rhs_null : !lhs_null);
string_view lhs_str = d_column.element<string_view>(lhs);
string_view rhs_str = d_column.element<string_view>(rhs);
int cmp = 0;
if (stype & sort_type::length) cmp = lhs_str.length() - rhs_str.length();
if (stype & sort_type::name) cmp = lhs_str.compare(rhs_str);
return (order == cudf::order::ASCENDING ? (cmp < 0) : (cmp > 0));
});
// create a column_view as a wrapper of these indices
column_view indices_view(data_type{INT32}, num_strings, indices.data().get(), nullptr, 0);
// now build a new strings column from the indices
auto table_sorted =
cudf::detail::gather(table_view{{strings.parent()}}, indices_view, stream, mr)->release();
return std::move(table_sorted.front());
}
} // namespace detail
} // namespace strings
} // namespace cudf
| 9c2c1e4ed49a5936c418049ecef1af5a4f101f54.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/strings/sorting.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
namespace cudf {
namespace strings {
namespace detail {
// return sorted version of the given strings column
std::unique_ptr<cudf::column> sort(strings_column_view strings,
sort_type stype,
cudf::order order,
cudf::null_order null_order,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
auto execpol = rmm::exec_policy(stream);
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// sort the indices of the strings
size_type num_strings = strings.size();
rmm::device_vector<size_type> indices(num_strings);
thrust::sequence(execpol->on(stream), indices.begin(), indices.end());
thrust::sort(execpol->on(stream),
indices.begin(),
indices.end(),
[d_column, stype, order, null_order] __device__(size_type lhs, size_type rhs) {
bool lhs_null{d_column.is_null(lhs)};
bool rhs_null{d_column.is_null(rhs)};
if (lhs_null || rhs_null)
return (null_order == cudf::null_order::BEFORE ? !rhs_null : !lhs_null);
string_view lhs_str = d_column.element<string_view>(lhs);
string_view rhs_str = d_column.element<string_view>(rhs);
int cmp = 0;
if (stype & sort_type::length) cmp = lhs_str.length() - rhs_str.length();
if (stype & sort_type::name) cmp = lhs_str.compare(rhs_str);
return (order == cudf::order::ASCENDING ? (cmp < 0) : (cmp > 0));
});
// create a column_view as a wrapper of these indices
column_view indices_view(data_type{INT32}, num_strings, indices.data().get(), nullptr, 0);
// now build a new strings column from the indices
auto table_sorted =
cudf::detail::gather(table_view{{strings.parent()}}, indices_view, stream, mr)->release();
return std::move(table_sorted.front());
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
7062ca5365a7f6cc1e2d11277f671c82347ebc2b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "cu_errchk.h"
#include "cufft_addredundants.h"
/* This will generate an array that adds the redundant values of a
CUDA R2C transformation. This function was found here:
https://devtalk.nvidia.com/default/topic/488433/cufft-only-gives-non-redundant-results/
*/
template<typename T>
__global__ void k_makeRedundant(T* __restrict__ dst, const T* __restrict__ src, int w, int h)
{
volatile int gid_x = threadIdx.x + blockIdx.x * blockDim.x;
volatile int gid_y = threadIdx.y + blockIdx.y * blockDim.y;
volatile int nbNoRedundants = (w >> 1) + 1;
// index for reading :
volatile int gid = gid_x + nbNoRedundants * gid_y;
T val;
if(gid_x < nbNoRedundants && gid_y < h) {
// write the non redundant part in the new array :
val = src[gid];
gid = gid_x + w * gid_y; // new index for writing
dst[gid] = val;
}
// shift'n'flip
gid_x = w - gid_x;
if(gid_y != 0) {
gid_y = h - gid_y;
}
gid = gid_x + w * gid_y;
// write conjugate :
if(gid_x >= nbNoRedundants && gid_x < w && gid_y >= 0 && gid_y < h) {
val.y = -val.y;
dst[gid] = val; // never coalesced with compute <= 1.1 ; coalesced if >= 1.2 AND w multiple of 16 AND good call configuration
}
}
/* C compatible version that requires a dtype_id to be converted
to the proper data type. */
void cufft_addredundants(void *d_idata,
void *d_odata,
int nx, int ny,
int dtype,
hipStream_t *stream)
{
dim3 blockSize(16,16);
dim3 gridSize((nx-1)/blockSize.x+1,
(ny-1)/blockSize.y+1);
hipStream_t stream_id;
(stream == NULL) ? stream_id = NULL : stream_id = *stream;
switch(dtype) {
case 2:
hipLaunchKernelGGL(( k_makeRedundant), dim3(gridSize), dim3(blockSize), 0, stream_id, static_cast<float2*>(d_odata),
static_cast<const float2*>(d_idata),
nx, ny);
break;
case 3:
hipLaunchKernelGGL(( k_makeRedundant), dim3(gridSize), dim3(blockSize), 0, stream_id, static_cast<double2*>(d_odata),
static_cast<const double2*>(d_idata),
nx, ny);
break;
}
return;
}
| 7062ca5365a7f6cc1e2d11277f671c82347ebc2b.cu | #include <cuda.h>
#include <cufft.h>
#include "cu_errchk.h"
#include "cufft_addredundants.h"
/* This will generate an array that adds the redundant values of a
CUDA R2C transformation. This function was found here:
https://devtalk.nvidia.com/default/topic/488433/cufft-only-gives-non-redundant-results/
*/
template<typename T>
__global__ void k_makeRedundant(T* __restrict__ dst, const T* __restrict__ src, int w, int h)
{
volatile int gid_x = threadIdx.x + blockIdx.x * blockDim.x;
volatile int gid_y = threadIdx.y + blockIdx.y * blockDim.y;
volatile int nbNoRedundants = (w >> 1) + 1;
// index for reading :
volatile int gid = gid_x + nbNoRedundants * gid_y;
T val;
if(gid_x < nbNoRedundants && gid_y < h) {
// write the non redundant part in the new array :
val = src[gid];
gid = gid_x + w * gid_y; // new index for writing
dst[gid] = val;
}
// shift'n'flip
gid_x = w - gid_x;
if(gid_y != 0) {
gid_y = h - gid_y;
}
gid = gid_x + w * gid_y;
// write conjugate :
if(gid_x >= nbNoRedundants && gid_x < w && gid_y >= 0 && gid_y < h) {
val.y = -val.y;
dst[gid] = val; // never coalesced with compute <= 1.1 ; coalesced if >= 1.2 AND w multiple of 16 AND good call configuration
}
}
/* C compatible version that requires a dtype_id to be converted
to the proper data type. */
void cufft_addredundants(void *d_idata,
void *d_odata,
int nx, int ny,
int dtype,
cudaStream_t *stream)
{
dim3 blockSize(16,16);
dim3 gridSize((nx-1)/blockSize.x+1,
(ny-1)/blockSize.y+1);
cudaStream_t stream_id;
(stream == NULL) ? stream_id = NULL : stream_id = *stream;
switch(dtype) {
case 2:
k_makeRedundant<<<gridSize, blockSize, 0, stream_id>>>(static_cast<float2*>(d_odata),
static_cast<const float2*>(d_idata),
nx, ny);
break;
case 3:
k_makeRedundant<<<gridSize, blockSize, 0, stream_id>>>(static_cast<double2*>(d_odata),
static_cast<const double2*>(d_idata),
nx, ny);
break;
}
return;
}
|
3c218534b6cd75a902220b70e3968072bd503444.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/conv_bias/chanwise/fwd.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "hip/hip_runtime.h"
#include "hip/hip_fp16.h"
#include "src/cuda/conv_bias/chanwise/kern.cuh"
#include "src/cuda/conv_bias/chanwise/kern_helper.cuh"
#include "src/cuda/fp16_help.cuh"
using namespace megdnn;
using namespace cuda;
using namespace conv_bias;
using namespace chanwise;
namespace {
// grid idx is (inp_chl, worker_index)
// each y-slice of a block works on an (N, CHL_MUL, OH, OW) spatial image at
// given inp_chl
template <typename T, int CHL_MUL_SET, int FH_SET, int FW_SET, int SW_SET>
__global__ void kern_fwd_float(T* dst, const T* src, const T* flt_tot, Param param) {
extern __shared__ uint8_t flt_storage[];
T* const flt = reinterpret_cast<T*>(flt_storage);
const uint32_t N = param.batch, IC = param.src_chl, ic = blockIdx.x,
IH = param.src_h, IW = param.src_w,
CHL_MUL = CHL_MUL_SET ? CHL_MUL_SET : param.chl_mul,
FH = FH_SET ? FH_SET : param.flt_h,
FW = FW_SET ? FW_SET : param.flt_w, FSIZE = FH * FW,
PH = param.pad_h, PW = param.pad_w, SH = param.stride_h,
SW = param.stride_w, OH = param.out_h, OW = param.out_w,
TOT_OUT = N * CHL_MUL * OH * OW;
block_memcpy(flt, flt_tot + ic * FSIZE * CHL_MUL, FSIZE * CHL_MUL);
uint32_t out_idx_ = blockIdx.y * blockDim.x + threadIdx.x,
nr_out_per_launch = blockDim.x * gridDim.y;
for (; out_idx_ < TOT_OUT; out_idx_ += nr_out_per_launch) {
uint32_t out_idx = out_idx_, n, chl_mul, oh, ow;
out_idx = div_mod(out_idx, OW, ow);
out_idx = div_mod(out_idx, OH, oh);
if (CHL_MUL_SET == 1) {
chl_mul = 0;
n = out_idx;
} else {
n = div_mod(out_idx, CHL_MUL, chl_mul);
}
int ih = int(oh * SH) - int(PH), iw = int(ow * SW) - int(PW);
const T* flt_base = flt + chl_mul * FSIZE;
const T* src_base = src + int(((n * IC + ic) * IH + ih) * IW + iw);
T sum(0);
if (FH_SET && FW_SET) {
#pragma unroll
for (uint32_t fh = 0; fh < FH; ++fh) {
// fh + ih < 0 would overflow, so we do not need to check it
if (static_cast<uint32_t>(fh + ih) < IH) {
#pragma unroll
for (uint32_t fw = 0; fw < FW; ++fw) {
if (static_cast<uint32_t>(fw + iw) < IW) {
sum += flt_base[fh * FW + fw] * src_base[fh * IW + fw];
}
}
}
}
} else {
int fhmax = min(int(FH), int(IH - ih)), fwmax = min(int(FW), int(IW - iw));
for (int fh = max(0, -ih); fh < fhmax; ++fh) {
for (int fw = max(0, -iw); fw < fwmax; ++fw) {
sum += flt_base[fh * FW + fw] * src_base[fh * IW + fw];
}
}
}
dst[(((n * IC + ic) * CHL_MUL + chl_mul) * OH + oh) * OW + ow] = sum;
}
}
#if TORCH_HIP_VERSION >= 9000
template <typename T, int CHL_MUL_SET, int FH_SET, int FW_SET, int SW_SET>
__global__ void kern_fwd_half(
__half* dst, const __half* src, const __half* flt_tot, Param param) {
extern __shared__ uint8_t flt_storage[];
__half* const flt = reinterpret_cast<__half*>(flt_storage);
const uint32_t N = param.batch, IC = param.src_chl, ic = blockIdx.x,
IH = param.src_h, IW = param.src_w,
CHL_MUL = CHL_MUL_SET ? CHL_MUL_SET : param.chl_mul,
FH = FH_SET ? FH_SET : param.flt_h,
FW = FW_SET ? FW_SET : param.flt_w, FSIZE = FH * FW,
PH = param.pad_h, PW = param.pad_w, SH = param.stride_h,
SW = param.stride_w, OH = param.out_h, OW = param.out_w,
TOT_OUT = N * CHL_MUL * OH * OW;
block_memcpy(flt, flt_tot + ic * FSIZE * CHL_MUL, FSIZE * CHL_MUL);
uint32_t out_idx_ = (blockIdx.y * blockDim.x + threadIdx.x) * 2,
nr_out_per_launch = (blockDim.x * gridDim.y) * 2;
for (; out_idx_ < TOT_OUT; out_idx_ += nr_out_per_launch) {
if (out_idx_ % OW < OW - 1) {
uint32_t out_idx = out_idx_, n, chl_mul, oh, ow;
out_idx = div_mod(out_idx, OW, ow);
out_idx = div_mod(out_idx, OH, oh);
if (CHL_MUL_SET == 1) {
chl_mul = 0;
n = out_idx;
} else {
n = div_mod(out_idx, CHL_MUL, chl_mul);
}
int ih = int(oh * SH) - int(PH), iw = int(ow * SW) - int(PW);
const __half* flt_base = flt + chl_mul * FSIZE;
const __half* src_base = src + int(((n * IC + ic) * IH + ih) * IW + iw);
__half2 sum{0.0, 0.0};
#pragma unroll
for (uint32_t fh = 0; fh < FH; ++fh) {
// fh + ih < 0 would overflow, so we do not need to
// check it
if (static_cast<uint32_t>(fh + ih) < IH) {
if (FH_SET == 3 && FW_SET == 3 && SW_SET == 1) {
__half2 fil0 = {flt_base[fh * FW], flt_base[fh * FW]};
__half2 fil1 = {flt_base[fh * FW + 1], flt_base[fh * FW + 1]};
__half2 fil2 = {flt_base[fh * FW + 2], flt_base[fh * FW + 2]};
__half2 src0 = {0.0, 0.0};
if (static_cast<uint32_t>(iw) < IW)
src0.x = src_base[fh * IW];
if (static_cast<uint32_t>(iw + 1) < IW)
src0.y = src_base[fh * IW + 1];
sum = fma2(src0, fil0, sum);
__half2 src2 = {0.0, 0.0};
if (static_cast<uint32_t>(iw + 2) < IW)
src2.x = src_base[fh * IW + 2];
if (static_cast<uint32_t>(iw + 3) < IW)
src2.y = src_base[fh * IW + 3];
sum = fma2(src2, fil2, sum);
__half2 src1 = {src0.y, src2.x};
sum = fma2(src1, fil1, sum);
} else if (FH_SET == 5 && FW_SET == 5 && SW_SET == 1) {
__half2 fil0 = {flt_base[fh * FW], flt_base[fh * FW]};
__half2 fil1 = {flt_base[fh * FW + 1], flt_base[fh * FW + 1]};
__half2 fil2 = {flt_base[fh * FW + 2], flt_base[fh * FW + 2]};
__half2 fil3 = {flt_base[fh * FW + 3], flt_base[fh * FW + 3]};
__half2 fil4 = {flt_base[fh * FW + 4], flt_base[fh * FW + 4]};
__half2 src0 = {0.0, 0.0};
if (static_cast<uint32_t>(iw) < IW)
src0.x = src_base[fh * IW];
if (static_cast<uint32_t>(iw + 1) < IW)
src0.y = src_base[fh * IW + 1];
sum = fma2(src0, fil0, sum);
__half2 src2 = {0.0, 0.0};
if (static_cast<uint32_t>(iw + 2) < IW)
src2.x = src_base[fh * IW + 2];
if (static_cast<uint32_t>(iw + 3) < IW)
src2.y = src_base[fh * IW + 3];
sum = fma2(src2, fil2, sum);
__half2 src1 = {src0.y, src2.x};
sum = fma2(src1, fil1, sum);
__half2 src4 = {0.0, 0.0};
if (static_cast<uint32_t>(iw + 4) < IW)
src4.x = src_base[fh * IW + 4];
if (static_cast<uint32_t>(iw + 5) < IW)
src4.y = src_base[fh * IW + 5];
sum = fma2(src4, fil4, sum);
__half2 src3 = {src2.y, src4.x};
sum = fma2(src3, fil3, sum);
} else {
#pragma unroll
for (uint32_t fw = 0; fw < FW; ++fw) {
__half2 fil = {
flt_base[fh * FW + fw], flt_base[fh * FW + fw]};
__half2 src = {0.0, 0.0};
if (static_cast<uint32_t>(static_cast<int>(fw) + iw) < IW)
src.x = src_base[fh * IW + fw];
if (static_cast<uint32_t>(static_cast<int>(fw) + iw + SW) <
IW)
src.y = src_base[fh * IW + fw + SW];
sum = fma2(src, fil, sum);
}
}
}
}
dst[(((n * IC + ic) * CHL_MUL + chl_mul) * OH + oh) * OW + ow] = sum.x;
dst[(((n * IC + ic) * CHL_MUL + chl_mul) * OH + oh) * OW + ow + 1] = sum.y;
continue;
}
// two discontinuous output
for (size_t offset = 0; offset < 2; ++offset) {
uint32_t out_idx = out_idx_ + offset, n, chl_mul, oh, ow;
out_idx = div_mod(out_idx, OW, ow);
out_idx = div_mod(out_idx, OH, oh);
if (CHL_MUL_SET == 1) {
chl_mul = 0;
n = out_idx;
} else {
n = div_mod(out_idx, CHL_MUL, chl_mul);
}
int ih = int(oh * SH) - int(PH), iw = int(ow * SW) - int(PW);
const __half* flt_base = flt + chl_mul * FSIZE;
const __half* src_base = src + int(((n * IC + ic) * IH + ih) * IW + iw);
__half sum(0);
if (FH_SET && FW_SET) {
#pragma unroll
for (uint32_t fh = 0; fh < FH; ++fh) {
// fh + ih < 0 would overflow, so we do not need to
// check it
if (static_cast<uint32_t>(fh + ih) < IH) {
#pragma unroll
for (uint32_t fw = 0; fw < FW; ++fw) {
if (static_cast<uint32_t>(fw + iw) < IW) {
sum =
fma(flt_base[fh * FW + fw],
src_base[fh * IW + fw], sum);
}
}
}
}
} else {
int fhmax = min(int(FH), int(IH - ih)),
fwmax = min(int(FW), int(IW - iw));
for (int fh = max(0, -ih); fh < fhmax; ++fh) {
for (int fw = max(0, -iw); fw < fwmax; ++fw) {
sum = fma(flt_base[fh * FW + fw], src_base[fh * IW + fw], sum);
}
}
}
dst[(((n * IC + ic) * CHL_MUL + chl_mul) * OH + oh) * OW + ow] = sum;
if (n == N - 1 && chl_mul == CHL_MUL - 1 && ow == OW - 1 && oh == OH - 1)
break;
}
}
}
#endif
#define SET_SW(func, type, sw) \
if (param.flt_h == 2 && param.flt_w == 2) { \
f_struct.f = func<type, 1, 2, 2, sw>; \
} else if (param.flt_h == 3 && param.flt_w == 3) { \
f_struct.f = func<type, 1, 3, 3, sw>; \
} else if (param.flt_h == 5 && param.flt_w == 5) { \
f_struct.f = func<type, 1, 5, 5, sw>; \
} else if (param.flt_h == 7 && param.flt_w == 7) { \
f_struct.f = func<type, 1, 7, 7, sw>; \
} else { \
f_struct.f = func<type, 1, 0, 0, sw>; \
}
#define GET_KERN(func, type) \
FixFunction<type> f_struct; \
if (param.chl_mul == 1) { \
if (param.stride_w == 1) { \
SET_SW(func, type, 1) \
} else { \
SET_SW(func, type, 0) \
} \
} else { \
f_struct.f = func<type, 0, 0, 0, 0>; \
} \
return f_struct;
template <typename T>
struct FixFunction {
void (*f)(T*, const T*, const T*, Param);
};
template <typename T>
FixFunction<T> get_kern(const Param& param);
template <>
FixFunction<float> get_kern<float>(const Param& param) {
GET_KERN(kern_fwd_float, float);
}
#if TORCH_HIP_VERSION >= 9000
template <>
FixFunction<__half> get_kern<__half>(const Param& param) {
GET_KERN(kern_fwd_half, __half);
}
#endif
template <>
FixFunction<dt_float16> get_kern<dt_float16>(const Param& param) {
GET_KERN(kern_fwd_float, dt_float16);
}
#undef SET_SW
#undef GET_KERN
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace conv_bias {
namespace chanwise {
template <typename T>
void run_fwd(
T* dst, const T* src, const T* flt, const Param& param, hipStream_t stream) {
void (*kern)(T*, const T*, const T*, Param);
kern = get_kern<T>(param).f;
int nr_thread = query_blocksize_for_kernel(kern),
nr_out_dimx = param.out_h * param.out_w * param.batch * param.chl_mul;
dim3 nr_block(param.src_chl, ::min(512, max(nr_out_dimx / (nr_thread * 4), 1)));
uint32_t shared = param.chl_mul * param.flt_h * param.flt_w * sizeof(T);
hipLaunchKernelGGL(( kern), dim3(nr_block), dim3(nr_thread), shared, stream, dst, src, flt, param);
after_kernel_launch();
}
template void run_fwd(float*, const float*, const float*, const Param&, hipStream_t);
#if TORCH_HIP_VERSION >= 9000
template void run_fwd(
__half*, const __half*, const __half*, const Param&, hipStream_t);
#endif
template void run_fwd(
dt_float16*, const dt_float16*, const dt_float16*, const Param&, hipStream_t);
} // namespace chanwise
} // namespace conv_bias
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
| 3c218534b6cd75a902220b70e3968072bd503444.cu | /**
* \file dnn/src/cuda/conv_bias/chanwise/fwd.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "cuda.h"
#include "cuda_fp16.h"
#include "src/cuda/conv_bias/chanwise/kern.cuh"
#include "src/cuda/conv_bias/chanwise/kern_helper.cuh"
#include "src/cuda/fp16_help.cuh"
using namespace megdnn;
using namespace cuda;
using namespace conv_bias;
using namespace chanwise;
namespace {
// grid idx is (inp_chl, worker_index)
// each y-slice of a block works on an (N, CHL_MUL, OH, OW) spatial image at
// given inp_chl
template <typename T, int CHL_MUL_SET, int FH_SET, int FW_SET, int SW_SET>
__global__ void kern_fwd_float(T* dst, const T* src, const T* flt_tot, Param param) {
extern __shared__ uint8_t flt_storage[];
T* const flt = reinterpret_cast<T*>(flt_storage);
const uint32_t N = param.batch, IC = param.src_chl, ic = blockIdx.x,
IH = param.src_h, IW = param.src_w,
CHL_MUL = CHL_MUL_SET ? CHL_MUL_SET : param.chl_mul,
FH = FH_SET ? FH_SET : param.flt_h,
FW = FW_SET ? FW_SET : param.flt_w, FSIZE = FH * FW,
PH = param.pad_h, PW = param.pad_w, SH = param.stride_h,
SW = param.stride_w, OH = param.out_h, OW = param.out_w,
TOT_OUT = N * CHL_MUL * OH * OW;
block_memcpy(flt, flt_tot + ic * FSIZE * CHL_MUL, FSIZE * CHL_MUL);
uint32_t out_idx_ = blockIdx.y * blockDim.x + threadIdx.x,
nr_out_per_launch = blockDim.x * gridDim.y;
for (; out_idx_ < TOT_OUT; out_idx_ += nr_out_per_launch) {
uint32_t out_idx = out_idx_, n, chl_mul, oh, ow;
out_idx = div_mod(out_idx, OW, ow);
out_idx = div_mod(out_idx, OH, oh);
if (CHL_MUL_SET == 1) {
chl_mul = 0;
n = out_idx;
} else {
n = div_mod(out_idx, CHL_MUL, chl_mul);
}
int ih = int(oh * SH) - int(PH), iw = int(ow * SW) - int(PW);
const T* flt_base = flt + chl_mul * FSIZE;
const T* src_base = src + int(((n * IC + ic) * IH + ih) * IW + iw);
T sum(0);
if (FH_SET && FW_SET) {
#pragma unroll
for (uint32_t fh = 0; fh < FH; ++fh) {
// fh + ih < 0 would overflow, so we do not need to check it
if (static_cast<uint32_t>(fh + ih) < IH) {
#pragma unroll
for (uint32_t fw = 0; fw < FW; ++fw) {
if (static_cast<uint32_t>(fw + iw) < IW) {
sum += flt_base[fh * FW + fw] * src_base[fh * IW + fw];
}
}
}
}
} else {
int fhmax = min(int(FH), int(IH - ih)), fwmax = min(int(FW), int(IW - iw));
for (int fh = max(0, -ih); fh < fhmax; ++fh) {
for (int fw = max(0, -iw); fw < fwmax; ++fw) {
sum += flt_base[fh * FW + fw] * src_base[fh * IW + fw];
}
}
}
dst[(((n * IC + ic) * CHL_MUL + chl_mul) * OH + oh) * OW + ow] = sum;
}
}
#if CUDA_VERSION >= 9000
template <typename T, int CHL_MUL_SET, int FH_SET, int FW_SET, int SW_SET>
__global__ void kern_fwd_half(
__half* dst, const __half* src, const __half* flt_tot, Param param) {
extern __shared__ uint8_t flt_storage[];
__half* const flt = reinterpret_cast<__half*>(flt_storage);
const uint32_t N = param.batch, IC = param.src_chl, ic = blockIdx.x,
IH = param.src_h, IW = param.src_w,
CHL_MUL = CHL_MUL_SET ? CHL_MUL_SET : param.chl_mul,
FH = FH_SET ? FH_SET : param.flt_h,
FW = FW_SET ? FW_SET : param.flt_w, FSIZE = FH * FW,
PH = param.pad_h, PW = param.pad_w, SH = param.stride_h,
SW = param.stride_w, OH = param.out_h, OW = param.out_w,
TOT_OUT = N * CHL_MUL * OH * OW;
block_memcpy(flt, flt_tot + ic * FSIZE * CHL_MUL, FSIZE * CHL_MUL);
uint32_t out_idx_ = (blockIdx.y * blockDim.x + threadIdx.x) * 2,
nr_out_per_launch = (blockDim.x * gridDim.y) * 2;
for (; out_idx_ < TOT_OUT; out_idx_ += nr_out_per_launch) {
if (out_idx_ % OW < OW - 1) {
uint32_t out_idx = out_idx_, n, chl_mul, oh, ow;
out_idx = div_mod(out_idx, OW, ow);
out_idx = div_mod(out_idx, OH, oh);
if (CHL_MUL_SET == 1) {
chl_mul = 0;
n = out_idx;
} else {
n = div_mod(out_idx, CHL_MUL, chl_mul);
}
int ih = int(oh * SH) - int(PH), iw = int(ow * SW) - int(PW);
const __half* flt_base = flt + chl_mul * FSIZE;
const __half* src_base = src + int(((n * IC + ic) * IH + ih) * IW + iw);
__half2 sum{0.0, 0.0};
#pragma unroll
for (uint32_t fh = 0; fh < FH; ++fh) {
// fh + ih < 0 would overflow, so we do not need to
// check it
if (static_cast<uint32_t>(fh + ih) < IH) {
if (FH_SET == 3 && FW_SET == 3 && SW_SET == 1) {
__half2 fil0 = {flt_base[fh * FW], flt_base[fh * FW]};
__half2 fil1 = {flt_base[fh * FW + 1], flt_base[fh * FW + 1]};
__half2 fil2 = {flt_base[fh * FW + 2], flt_base[fh * FW + 2]};
__half2 src0 = {0.0, 0.0};
if (static_cast<uint32_t>(iw) < IW)
src0.x = src_base[fh * IW];
if (static_cast<uint32_t>(iw + 1) < IW)
src0.y = src_base[fh * IW + 1];
sum = fma2(src0, fil0, sum);
__half2 src2 = {0.0, 0.0};
if (static_cast<uint32_t>(iw + 2) < IW)
src2.x = src_base[fh * IW + 2];
if (static_cast<uint32_t>(iw + 3) < IW)
src2.y = src_base[fh * IW + 3];
sum = fma2(src2, fil2, sum);
__half2 src1 = {src0.y, src2.x};
sum = fma2(src1, fil1, sum);
} else if (FH_SET == 5 && FW_SET == 5 && SW_SET == 1) {
__half2 fil0 = {flt_base[fh * FW], flt_base[fh * FW]};
__half2 fil1 = {flt_base[fh * FW + 1], flt_base[fh * FW + 1]};
__half2 fil2 = {flt_base[fh * FW + 2], flt_base[fh * FW + 2]};
__half2 fil3 = {flt_base[fh * FW + 3], flt_base[fh * FW + 3]};
__half2 fil4 = {flt_base[fh * FW + 4], flt_base[fh * FW + 4]};
__half2 src0 = {0.0, 0.0};
if (static_cast<uint32_t>(iw) < IW)
src0.x = src_base[fh * IW];
if (static_cast<uint32_t>(iw + 1) < IW)
src0.y = src_base[fh * IW + 1];
sum = fma2(src0, fil0, sum);
__half2 src2 = {0.0, 0.0};
if (static_cast<uint32_t>(iw + 2) < IW)
src2.x = src_base[fh * IW + 2];
if (static_cast<uint32_t>(iw + 3) < IW)
src2.y = src_base[fh * IW + 3];
sum = fma2(src2, fil2, sum);
__half2 src1 = {src0.y, src2.x};
sum = fma2(src1, fil1, sum);
__half2 src4 = {0.0, 0.0};
if (static_cast<uint32_t>(iw + 4) < IW)
src4.x = src_base[fh * IW + 4];
if (static_cast<uint32_t>(iw + 5) < IW)
src4.y = src_base[fh * IW + 5];
sum = fma2(src4, fil4, sum);
__half2 src3 = {src2.y, src4.x};
sum = fma2(src3, fil3, sum);
} else {
#pragma unroll
for (uint32_t fw = 0; fw < FW; ++fw) {
__half2 fil = {
flt_base[fh * FW + fw], flt_base[fh * FW + fw]};
__half2 src = {0.0, 0.0};
if (static_cast<uint32_t>(static_cast<int>(fw) + iw) < IW)
src.x = src_base[fh * IW + fw];
if (static_cast<uint32_t>(static_cast<int>(fw) + iw + SW) <
IW)
src.y = src_base[fh * IW + fw + SW];
sum = fma2(src, fil, sum);
}
}
}
}
dst[(((n * IC + ic) * CHL_MUL + chl_mul) * OH + oh) * OW + ow] = sum.x;
dst[(((n * IC + ic) * CHL_MUL + chl_mul) * OH + oh) * OW + ow + 1] = sum.y;
continue;
}
// two discontinuous output
for (size_t offset = 0; offset < 2; ++offset) {
uint32_t out_idx = out_idx_ + offset, n, chl_mul, oh, ow;
out_idx = div_mod(out_idx, OW, ow);
out_idx = div_mod(out_idx, OH, oh);
if (CHL_MUL_SET == 1) {
chl_mul = 0;
n = out_idx;
} else {
n = div_mod(out_idx, CHL_MUL, chl_mul);
}
int ih = int(oh * SH) - int(PH), iw = int(ow * SW) - int(PW);
const __half* flt_base = flt + chl_mul * FSIZE;
const __half* src_base = src + int(((n * IC + ic) * IH + ih) * IW + iw);
__half sum(0);
if (FH_SET && FW_SET) {
#pragma unroll
for (uint32_t fh = 0; fh < FH; ++fh) {
// fh + ih < 0 would overflow, so we do not need to
// check it
if (static_cast<uint32_t>(fh + ih) < IH) {
#pragma unroll
for (uint32_t fw = 0; fw < FW; ++fw) {
if (static_cast<uint32_t>(fw + iw) < IW) {
sum =
fma(flt_base[fh * FW + fw],
src_base[fh * IW + fw], sum);
}
}
}
}
} else {
int fhmax = min(int(FH), int(IH - ih)),
fwmax = min(int(FW), int(IW - iw));
for (int fh = max(0, -ih); fh < fhmax; ++fh) {
for (int fw = max(0, -iw); fw < fwmax; ++fw) {
sum = fma(flt_base[fh * FW + fw], src_base[fh * IW + fw], sum);
}
}
}
dst[(((n * IC + ic) * CHL_MUL + chl_mul) * OH + oh) * OW + ow] = sum;
if (n == N - 1 && chl_mul == CHL_MUL - 1 && ow == OW - 1 && oh == OH - 1)
break;
}
}
}
#endif
#define SET_SW(func, type, sw) \
if (param.flt_h == 2 && param.flt_w == 2) { \
f_struct.f = func<type, 1, 2, 2, sw>; \
} else if (param.flt_h == 3 && param.flt_w == 3) { \
f_struct.f = func<type, 1, 3, 3, sw>; \
} else if (param.flt_h == 5 && param.flt_w == 5) { \
f_struct.f = func<type, 1, 5, 5, sw>; \
} else if (param.flt_h == 7 && param.flt_w == 7) { \
f_struct.f = func<type, 1, 7, 7, sw>; \
} else { \
f_struct.f = func<type, 1, 0, 0, sw>; \
}
#define GET_KERN(func, type) \
FixFunction<type> f_struct; \
if (param.chl_mul == 1) { \
if (param.stride_w == 1) { \
SET_SW(func, type, 1) \
} else { \
SET_SW(func, type, 0) \
} \
} else { \
f_struct.f = func<type, 0, 0, 0, 0>; \
} \
return f_struct;
template <typename T>
struct FixFunction {
void (*f)(T*, const T*, const T*, Param);
};
template <typename T>
FixFunction<T> get_kern(const Param& param);
template <>
FixFunction<float> get_kern<float>(const Param& param) {
GET_KERN(kern_fwd_float, float);
}
#if CUDA_VERSION >= 9000
template <>
FixFunction<__half> get_kern<__half>(const Param& param) {
GET_KERN(kern_fwd_half, __half);
}
#endif
template <>
FixFunction<dt_float16> get_kern<dt_float16>(const Param& param) {
GET_KERN(kern_fwd_float, dt_float16);
}
#undef SET_SW
#undef GET_KERN
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace conv_bias {
namespace chanwise {
template <typename T>
void run_fwd(
T* dst, const T* src, const T* flt, const Param& param, cudaStream_t stream) {
void (*kern)(T*, const T*, const T*, Param);
kern = get_kern<T>(param).f;
int nr_thread = query_blocksize_for_kernel(kern),
nr_out_dimx = param.out_h * param.out_w * param.batch * param.chl_mul;
dim3 nr_block(param.src_chl, std::min(512, max(nr_out_dimx / (nr_thread * 4), 1)));
uint32_t shared = param.chl_mul * param.flt_h * param.flt_w * sizeof(T);
kern<<<nr_block, nr_thread, shared, stream>>>(dst, src, flt, param);
after_kernel_launch();
}
template void run_fwd(float*, const float*, const float*, const Param&, cudaStream_t);
#if CUDA_VERSION >= 9000
template void run_fwd(
__half*, const __half*, const __half*, const Param&, cudaStream_t);
#endif
template void run_fwd(
dt_float16*, const dt_float16*, const dt_float16*, const Param&, cudaStream_t);
} // namespace chanwise
} // namespace conv_bias
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
|
25cbd062c83b9e96a2e04d355257284fa589c832.hip | // !!! This is a file automatically generated by hipify!!!
// Memoria global
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define N 16
int main(int argc, char** argv) {
// declaraciones
float *hst_A, *hst_B;
float *dev_A, *dev_B;
// reserva en el host
hst_A = (float*)malloc(N * sizeof(float));
hst_B = (float*)malloc(N * sizeof(float));
// reserva en el device
hipMalloc((void**)&dev_A, N * sizeof(float));
hipMalloc((void**)&dev_B, N * sizeof(float));
// inicializacion
for (int i=0; i<N; i++) {
hst_A[i] = (float)rand() / RAND_MAX;
hst_B[i] = 0;
}
// copia de datos
hipMemcpy(dev_A, hst_A, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_B, dev_A, N*sizeof(float), hipMemcpyDeviceToDevice);
hipMemcpy(hst_B, dev_B, N*sizeof(float), hipMemcpyDeviceToHost);
// muestra de resultados
printf("ENTRADA (hst_A):\n");
for (int i=0; i<N; i++) {
printf("%.2f ", hst_A[i]);
}
printf("\n");
printf("SALIDA (hst_B):\n");
for (int i=0; i<N; i++) {
printf("%.2f ", hst_B[i]);
}
printf("\n");
// liberacion de recursos
hipFree(dev_A);
hipFree(dev_B);
return 0;
}
| 25cbd062c83b9e96a2e04d355257284fa589c832.cu | // Memoria global
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define N 16
int main(int argc, char** argv) {
// declaraciones
float *hst_A, *hst_B;
float *dev_A, *dev_B;
// reserva en el host
hst_A = (float*)malloc(N * sizeof(float));
hst_B = (float*)malloc(N * sizeof(float));
// reserva en el device
cudaMalloc((void**)&dev_A, N * sizeof(float));
cudaMalloc((void**)&dev_B, N * sizeof(float));
// inicializacion
for (int i=0; i<N; i++) {
hst_A[i] = (float)rand() / RAND_MAX;
hst_B[i] = 0;
}
// copia de datos
cudaMemcpy(dev_A, hst_A, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, dev_A, N*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(hst_B, dev_B, N*sizeof(float), cudaMemcpyDeviceToHost);
// muestra de resultados
printf("ENTRADA (hst_A):\n");
for (int i=0; i<N; i++) {
printf("%.2f ", hst_A[i]);
}
printf("\n");
printf("SALIDA (hst_B):\n");
for (int i=0; i<N; i++) {
printf("%.2f ", hst_B[i]);
}
printf("\n");
// liberacion de recursos
cudaFree(dev_A);
cudaFree(dev_B);
return 0;
}
|
0beebb09025f8c0a0f830f3869c99586c7d1adff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cstdio>
#include<cstdlib>
#include "tools.h"
#include "timer.h"
#include "reorder.h"
#include "triangle.h"
#include "io.h"
int main(int argc, char *argv[]){
if(argc != 6){
fprintf(stderr, "usage: g_list <algorithm> <input_path> <node_num> <thread_per_block> <block_num>\n");
return 0;
}
int algo = getAlgo(argv[1]);
if(algo == -1){
fprintf(stderr, "algorithm should be forward or edge\n");
return 0;
}
int nodeNum = atoi(argv[3]);
int threadNum = atoi(argv[4]);
int blockNum = atoi(argv[5]);
vector< Node > node(nodeNum);
vector< Edge > edge;
inputList(argv[2], edge);
int edgeNum = (int)edge.size();
int *d_triNum, *d_offset, *d_edgeV;
hipSetDevice(1);
hipMalloc((void**)&d_triNum, sizeof(int));
hipMalloc((void**)&d_offset, sizeof(int)*(nodeNum+1));
hipMalloc((void**)&d_edgeV, sizeof(int)*edgeNum);
timerInit(1)
timerStart(0)
int maxDeg = reorder(algo, node, edge);
initDeviceTriNum(d_triNum);
listCopyToDevice(node, edgeNum, d_offset, d_edgeV);
int smSize = (threadNum+maxDeg) * sizeof(int);
hipLaunchKernelGGL(( gpuCountTriNum), dim3(blockNum), dim3(threadNum), smSize , 0, d_offset, d_edgeV, d_triNum, nodeNum);
hipDeviceSynchronize();
int triNum;
hipMemcpy(&triNum, d_triNum, sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
timerEnd("total", 0)
hipFree(d_triNum);
hipFree(d_offset);
hipFree(d_edgeV);
printf("total triangle: %d\n", triNum);
return 0;
}
| 0beebb09025f8c0a0f830f3869c99586c7d1adff.cu | #include<cstdio>
#include<cstdlib>
#include "tools.h"
#include "timer.h"
#include "reorder.h"
#include "triangle.h"
#include "io.h"
int main(int argc, char *argv[]){
if(argc != 6){
fprintf(stderr, "usage: g_list <algorithm> <input_path> <node_num> <thread_per_block> <block_num>\n");
return 0;
}
int algo = getAlgo(argv[1]);
if(algo == -1){
fprintf(stderr, "algorithm should be forward or edge\n");
return 0;
}
int nodeNum = atoi(argv[3]);
int threadNum = atoi(argv[4]);
int blockNum = atoi(argv[5]);
vector< Node > node(nodeNum);
vector< Edge > edge;
inputList(argv[2], edge);
int edgeNum = (int)edge.size();
int *d_triNum, *d_offset, *d_edgeV;
cudaSetDevice(1);
cudaMalloc((void**)&d_triNum, sizeof(int));
cudaMalloc((void**)&d_offset, sizeof(int)*(nodeNum+1));
cudaMalloc((void**)&d_edgeV, sizeof(int)*edgeNum);
timerInit(1)
timerStart(0)
int maxDeg = reorder(algo, node, edge);
initDeviceTriNum(d_triNum);
listCopyToDevice(node, edgeNum, d_offset, d_edgeV);
int smSize = (threadNum+maxDeg) * sizeof(int);
gpuCountTriNum<<< blockNum, threadNum, smSize >>>(d_offset, d_edgeV, d_triNum, nodeNum);
cudaDeviceSynchronize();
int triNum;
cudaMemcpy(&triNum, d_triNum, sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
timerEnd("total", 0)
cudaFree(d_triNum);
cudaFree(d_offset);
cudaFree(d_edgeV);
printf("total triangle: %d\n", triNum);
return 0;
}
|
abb88caae3c1f4094290baa1573d0ac5f25dae38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #include <stdio.h>
// #include <rocm_smi/rocm_smi.h>
#include "nvmlPower.hpp"
#include "nvmlPower.cpp"
//run command:
//nvcc additionFP32.cu -L/usr/lib64/nvidia -lnvidia-ml -lpthread -I/usr/local/cuda-7.0/samples/common/inc/ -I/nvmlPower.cpp
/*********************************************
******************Pair #1*********************
**********************************************/
/* Ideas:
keep input small-ish
loop
don't read much data
difference should only be additions, nothing else
*/
__global__
void addition1_FP32(int n, int iterateNum, float *x, float *y) {
int start = blockIdx.x*blockDim.x + threadIdx.x;
int l = 0;
for (int i = start; i < iterateNum /*&& i < n && i >= 0*/; i++) {
l = (l+1) % n;
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
}
}
__global__
void addition2_FP32(int n, int iterateNum, float *x, float *y) {
int start = blockIdx.x*blockDim.x + threadIdx.x;
int l = 0;
for (int i = start; i < iterateNum /*&& i < n && i >= 0*/; i++) {
l = (l+1) % n;
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
//6 more additions?:
//no extra lookups
//l-1, l-2 already calculated, so no subtraction needed
//no new stores after optimization:
// x[l] store will only happen once, not 3 times.
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
}
}
/*********************************************
******************Pair #2*********************
**********************************************/
/*
Loop:
x[i] = y[i]
---vs.---
loop2:
x[i] = + y[i] + y[i] + y[i]
*/
__global__
void addition3_FP32(int n, int iterateNum, float *x, float *y) {
int start = blockIdx.x*blockDim.x + threadIdx.x;
int l = 0;
for (int i = start; i < iterateNum /*&& i < n && i >= 0*/; i++) {
l = (l+1) % n;
x[l] = y[l];
}
}
__global__
void addition4_FP32(int n, int iterateNum, float *x, float *y) {
int start = blockIdx.x*blockDim.x + threadIdx.x;
int l = 0;
for (int i = start; i < iterateNum /*&& i < n && i >= 0*/; i++) {
l = (l+1) % n;
x[l] = y[l] + y[l] + y[l];
}
}
/*********************************************
******************Pair #3*********************
**********************************************/
__global__
void addition5_FP32(int n, int iterateNum, float *x, float *y) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile float a = x[thread];
volatile float b = 1000;
volatile float c = 1000;
for (int i = 0; i < iterateNum; i++) {
b = a + i;
c = a + b;
a = c + a;
}
x[thread] = a;
}
__global__
void addition6_FP32(int n, int iterateNum, float *x, float *y) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile float a = x[thread];
volatile float b = 1000;
volatile float c = 1000;
for (int i = 0; i < iterateNum; i++) {
b = a + i;
c = a + b;
a = c + a;
c = b + a;
b = c + a;
a = b + c;
}
x[thread] = a;
}
/*********************************************
**********************************************
**********************************************/
__global__
void createData(int n, float *x, float *y, float a, float b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
x[i] = a;
y[i] = b;
}
}
void HANDLE_ERROR(hipError_t e) {
if (e != hipSuccess) {
printf("cuda Error: \"%s\"\n", hipGetErrorString(e));
}
}
void runAnalysis(const char *outputName,
void gpuFunc(int, int, float *, float *),
int n, int iterateNum, float *d_x, float *d_y) {
float time;
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate(&start) );
HANDLE_ERROR( hipEventCreate(&stop) );
nvmlAPIRun(outputName);
HANDLE_ERROR( hipEventRecord(start) );
hipLaunchKernelGGL(( gpuFunc), dim3((n+255)/256), dim3(256), 0, 0, n, iterateNum, d_x, d_y);
HANDLE_ERROR( hipEventRecord(stop, 0) );
HANDLE_ERROR( hipEventSynchronize(stop) );
HANDLE_ERROR( hipEventElapsedTime(&time, start, stop) );
nvmlAPIEnd();
FILE *fp = fopen(outputName, "r+");
if (fp == NULL) {
printf("Attempt at writing 'time Elapsed' in '%s' failed. Error: ", outputName);
perror("");
printf("Terminating...");
exit(0);
}
fseek(fp, 30, SEEK_SET);
fprintf(fp, "timeElapsed:, %3.1f\n", time);
fclose(fp);
printf("Algorithm finished, results saved in %s\n", outputName);
}
int main(int argc, char* argv[])
{
if (argc !=3) {
printf("expected 2 int or float arguments. Quitting.\n");
exit(0);
}
int N = 1<<18;
int iterateNum = 10000000;
// int deviceId = 0;
// hipError_t cudaRet;
// hipDeviceProp_t deviceProp;
// cudaRet = hipGetDeviceProperties ( &deviceProp, deviceId );
// if (cudaRet != hipSuccess) {
// printf("get deviceProp failed: %s\n", hipGetErrorString(cudaRet));
// }
// int numBlocks = 360 * deviceProp.multiProcessorCount;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
HANDLE_ERROR( hipMalloc(&d_x, N*sizeof(float)) );
HANDLE_ERROR( hipMalloc(&d_y, N*sizeof(float)) );
hipLaunchKernelGGL(( createData), dim3((N+255)/256), dim3(256), 0, 0, N, d_x, d_y, atof(argv[1]), atof(argv[2]));
runAnalysis("Power_data_add32_Alg5.txt", addition5_FP32, N, iterateNum, d_x, d_y);
HANDLE_ERROR( hipMemcpy(x, d_x, N*sizeof(float), hipMemcpyDeviceToHost) );
runAnalysis("Power_data_add32_Alg6.txt", addition6_FP32, N, iterateNum, d_x, d_y);
HANDLE_ERROR( hipMemcpy(x, d_x, N*sizeof(float), hipMemcpyDeviceToHost) );
HANDLE_ERROR( hipFree(d_x) );
HANDLE_ERROR( hipFree(d_y) );
free(x);
free(y);
}
| abb88caae3c1f4094290baa1573d0ac5f25dae38.cu | // #include <stdio.h>
// #include <nvml.h>
#include "nvmlPower.hpp"
#include "nvmlPower.cpp"
//run command:
//nvcc additionFP32.cu -L/usr/lib64/nvidia -lnvidia-ml -lpthread -I/usr/local/cuda-7.0/samples/common/inc/ -I/nvmlPower.cpp
/*********************************************
******************Pair #1*********************
**********************************************/
/* Ideas:
keep input small-ish
loop
don't read much data
difference should only be additions, nothing else
*/
__global__
void addition1_FP32(int n, int iterateNum, float *x, float *y) {
int start = blockIdx.x*blockDim.x + threadIdx.x;
int l = 0;
for (int i = start; i < iterateNum /*&& i < n && i >= 0*/; i++) {
l = (l+1) % n;
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
}
}
__global__
void addition2_FP32(int n, int iterateNum, float *x, float *y) {
int start = blockIdx.x*blockDim.x + threadIdx.x;
int l = 0;
for (int i = start; i < iterateNum /*&& i < n && i >= 0*/; i++) {
l = (l+1) % n;
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
//6 more additions?:
//no extra lookups
//l-1, l-2 already calculated, so no subtraction needed
//no new stores after optimization:
// x[l] store will only happen once, not 3 times.
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
x[l] = x[l] + y[l] + y[l-1] + y[l-2];
}
}
/*********************************************
******************Pair #2*********************
**********************************************/
/*
Loop:
x[i] = y[i]
---vs.---
loop2:
x[i] = + y[i] + y[i] + y[i]
*/
__global__
void addition3_FP32(int n, int iterateNum, float *x, float *y) {
int start = blockIdx.x*blockDim.x + threadIdx.x;
int l = 0;
for (int i = start; i < iterateNum /*&& i < n && i >= 0*/; i++) {
l = (l+1) % n;
x[l] = y[l];
}
}
__global__
void addition4_FP32(int n, int iterateNum, float *x, float *y) {
int start = blockIdx.x*blockDim.x + threadIdx.x;
int l = 0;
for (int i = start; i < iterateNum /*&& i < n && i >= 0*/; i++) {
l = (l+1) % n;
x[l] = y[l] + y[l] + y[l];
}
}
/*********************************************
******************Pair #3*********************
**********************************************/
__global__
void addition5_FP32(int n, int iterateNum, float *x, float *y) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile float a = x[thread];
volatile float b = 1000;
volatile float c = 1000;
for (int i = 0; i < iterateNum; i++) {
b = a + i;
c = a + b;
a = c + a;
}
x[thread] = a;
}
__global__
void addition6_FP32(int n, int iterateNum, float *x, float *y) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile float a = x[thread];
volatile float b = 1000;
volatile float c = 1000;
for (int i = 0; i < iterateNum; i++) {
b = a + i;
c = a + b;
a = c + a;
c = b + a;
b = c + a;
a = b + c;
}
x[thread] = a;
}
/*********************************************
**********************************************
**********************************************/
__global__
void createData(int n, float *x, float *y, float a, float b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
x[i] = a;
y[i] = b;
}
}
void HANDLE_ERROR(cudaError_t e) {
if (e != cudaSuccess) {
printf("cuda Error: \"%s\"\n", cudaGetErrorString(e));
}
}
void runAnalysis(const char *outputName,
void gpuFunc(int, int, float *, float *),
int n, int iterateNum, float *d_x, float *d_y) {
float time;
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
nvmlAPIRun(outputName);
HANDLE_ERROR( cudaEventRecord(start) );
gpuFunc<<<(n+255)/256, 256>>>(n, iterateNum, d_x, d_y);
HANDLE_ERROR( cudaEventRecord(stop, 0) );
HANDLE_ERROR( cudaEventSynchronize(stop) );
HANDLE_ERROR( cudaEventElapsedTime(&time, start, stop) );
nvmlAPIEnd();
FILE *fp = fopen(outputName, "r+");
if (fp == NULL) {
printf("Attempt at writing 'time Elapsed' in '%s' failed. Error: ", outputName);
perror("");
printf("Terminating...");
exit(0);
}
fseek(fp, 30, SEEK_SET);
fprintf(fp, "timeElapsed:, %3.1f\n", time);
fclose(fp);
printf("Algorithm finished, results saved in %s\n", outputName);
}
int main(int argc, char* argv[])
{
if (argc !=3) {
printf("expected 2 int or float arguments. Quitting.\n");
exit(0);
}
int N = 1<<18;
int iterateNum = 10000000;
// int deviceId = 0;
// cudaError_t cudaRet;
// cudaDeviceProp deviceProp;
// cudaRet = cudaGetDeviceProperties ( &deviceProp, deviceId );
// if (cudaRet != cudaSuccess) {
// printf("get deviceProp failed: %s\n", cudaGetErrorString(cudaRet));
// }
// int numBlocks = 360 * deviceProp.multiProcessorCount;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
HANDLE_ERROR( cudaMalloc(&d_x, N*sizeof(float)) );
HANDLE_ERROR( cudaMalloc(&d_y, N*sizeof(float)) );
createData<<<(N+255)/256, 256>>>(N, d_x, d_y, atof(argv[1]), atof(argv[2]));
runAnalysis("Power_data_add32_Alg5.txt", addition5_FP32, N, iterateNum, d_x, d_y);
HANDLE_ERROR( cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost) );
runAnalysis("Power_data_add32_Alg6.txt", addition6_FP32, N, iterateNum, d_x, d_y);
HANDLE_ERROR( cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaFree(d_x) );
HANDLE_ERROR( cudaFree(d_y) );
free(x);
free(y);
}
|
floyd_gpu.hip | // !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <iostream>
// C++ Program for Floyd Warshall Algorithm
//#include <bits/stdc++.h>
#include <sys/time.h>
using namespace std;
#include <hip/hip_runtime.h>
/* Define Infinite as a large enough
value.This value will be used for
vertices not connected to each other */
#define INF 99999
#define DEBUG 0
// A function to print the solution matrix
void printSolution(int ** dist, int nNodes);
double GetTime(void)
{
struct timeval time;
double Time;
gettimeofday(&time, (struct timezone *) NULL);
Time = ((double)time.tv_sec*1000000.0 + (double)time.tv_usec);
return(Time);
}
// Solves the all-pairs shortest path
// problem using Floyd Warshall algorithm
__global__ void floydWarshall_kernel(int *dist, int nNodes) {
/* Add all vertices one by one to
the set of intermediate vertices.
---> Before start of an iteration,
we have shortest distances between all
pairs of vertices such that the
shortest distances consider only the
vertices in set {0, 1, 2, .. k-1} as
intermediate vertices.
----> After the end of an iteration,
vertex no. k is added to the set of
intermediate vertices and the set becomes {0, 1, 2, .. k} */
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (k >= nNodes) return;
if (i >= nNodes) return;
// Pick all vertices as destination for the
// above picked source
for (int j = 0; j < nNodes; j++) {
// If vertex k is on the shortest path from
// i to j, then update the value of dist[i][j]
if (dist[i * nNodes + k] + dist[k * nNodes + j] < dist[i * nNodes + j])
dist[i * nNodes + j] = dist[i * nNodes + k] + dist[k * nNodes + j];
}
}
int main(int argc, char **argv){
double timeElapsed, clockBegin;
int** graph;
int a, b, w, nNodes;
if (argc < 2) {
cout << "Usage: ./" << argv[0] << " <graph>" << endl;
exit(-1);
}
ifstream inputfile(argv[1]);
inputfile >> nNodes;
graph = new int*[nNodes];
for (int i = 0; i < nNodes; ++i)
{
graph[i] = new int[nNodes];
for (int j = 0; j < nNodes; ++j)
graph[i][j] = INF;
}
while (inputfile >> a >> b >> w)
{
graph[a][b] = w;
graph[b][a] = w;
}
/* Floyd initialization */
int **dist, *dist_d;
dist = new int*[nNodes];
for (int i = 0; i < nNodes; ++i)
dist[i] = new int[nNodes];
/* dist[][] will be the output matrix
that will finally have the shortest
distances between every pair of vertices */
/* Initialize the solution matrix same
as input graph matrix. Or we can say
the initial values of shortest distances
are based on shortest paths considering
no intermediate vertex. */
for (int i = 0; i < nNodes; i++)
for (int j = 0; j < nNodes; j++)
dist[i][j] = graph[i][j];
hipMalloc(&dist_d, nNodes * nNodes * sizeof(int));
for (int i = 0; i < nNodes; i++)
hipMemcpy(&dist_d[i * nNodes], dist[i], nNodes * sizeof(int), hipMemcpyHostToDevice);
/* Floyd execution */
clockBegin = GetTime();
dim3 blockSize(32, 32);
dim3 numBlocks((nNodes + blockSize.x - 1) / blockSize.x, (nNodes + blockSize.y - 1) / blockSize.y);
hipLaunchKernelGGL(( floydWarshall_kernel), dim3(numBlocks), dim3(blockSize), 0, 0, dist_d, nNodes);
hipDeviceSynchronize();
timeElapsed = (GetTime() - clockBegin)/1000000;
for (int i = 0; i < nNodes; i++)
hipMemcpy(dist[i], &dist_d[i * nNodes], nNodes * sizeof(int), hipMemcpyDeviceToHost);
#if DEBUG == 1
// Print the shortest distance matrix
printSolution(dist, nNodes);
#endif
printf("%5lf\n", timeElapsed);
for (int i = 0; i < nNodes; i++) {
free(graph[i]);
free(dist[i]);
}
hipFree(dist_d);
return 0;
}
/* A utility function to print solution */
void printSolution(int **dist, int nNodes)
{
for (int i = 0; i < nNodes; i++)
{
for (int j = 0; j < nNodes; j++)
{
if (dist[i][j] == INF)
cout<<"INF"<<" ";
else
cout<<dist[i][j]<<" ";
}
cout<<endl;
}
}
// This code is contributed by rathbhupendra
| floyd_gpu.cu | #include <fstream>
#include <iostream>
// C++ Program for Floyd Warshall Algorithm
//#include <bits/stdc++.h>
#include <sys/time.h>
using namespace std;
#include <cuda_runtime.h>
/* Define Infinite as a large enough
value.This value will be used for
vertices not connected to each other */
#define INF 99999
#define DEBUG 0
// A function to print the solution matrix
void printSolution(int ** dist, int nNodes);
double GetTime(void)
{
struct timeval time;
double Time;
gettimeofday(&time, (struct timezone *) NULL);
Time = ((double)time.tv_sec*1000000.0 + (double)time.tv_usec);
return(Time);
}
// Solves the all-pairs shortest path
// problem using Floyd Warshall algorithm
__global__ void floydWarshall_kernel(int *dist, int nNodes) {
/* Add all vertices one by one to
the set of intermediate vertices.
---> Before start of an iteration,
we have shortest distances between all
pairs of vertices such that the
shortest distances consider only the
vertices in set {0, 1, 2, .. k-1} as
intermediate vertices.
----> After the end of an iteration,
vertex no. k is added to the set of
intermediate vertices and the set becomes {0, 1, 2, .. k} */
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (k >= nNodes) return;
if (i >= nNodes) return;
// Pick all vertices as destination for the
// above picked source
for (int j = 0; j < nNodes; j++) {
// If vertex k is on the shortest path from
// i to j, then update the value of dist[i][j]
if (dist[i * nNodes + k] + dist[k * nNodes + j] < dist[i * nNodes + j])
dist[i * nNodes + j] = dist[i * nNodes + k] + dist[k * nNodes + j];
}
}
int main(int argc, char **argv){
double timeElapsed, clockBegin;
int** graph;
int a, b, w, nNodes;
if (argc < 2) {
cout << "Usage: ./" << argv[0] << " <graph>" << endl;
exit(-1);
}
ifstream inputfile(argv[1]);
inputfile >> nNodes;
graph = new int*[nNodes];
for (int i = 0; i < nNodes; ++i)
{
graph[i] = new int[nNodes];
for (int j = 0; j < nNodes; ++j)
graph[i][j] = INF;
}
while (inputfile >> a >> b >> w)
{
graph[a][b] = w;
graph[b][a] = w;
}
/* Floyd initialization */
int **dist, *dist_d;
dist = new int*[nNodes];
for (int i = 0; i < nNodes; ++i)
dist[i] = new int[nNodes];
/* dist[][] will be the output matrix
that will finally have the shortest
distances between every pair of vertices */
/* Initialize the solution matrix same
as input graph matrix. Or we can say
the initial values of shortest distances
are based on shortest paths considering
no intermediate vertex. */
for (int i = 0; i < nNodes; i++)
for (int j = 0; j < nNodes; j++)
dist[i][j] = graph[i][j];
cudaMalloc(&dist_d, nNodes * nNodes * sizeof(int));
for (int i = 0; i < nNodes; i++)
cudaMemcpy(&dist_d[i * nNodes], dist[i], nNodes * sizeof(int), cudaMemcpyHostToDevice);
/* Floyd execution */
clockBegin = GetTime();
dim3 blockSize(32, 32);
dim3 numBlocks((nNodes + blockSize.x - 1) / blockSize.x, (nNodes + blockSize.y - 1) / blockSize.y);
floydWarshall_kernel<<<numBlocks, blockSize>>>(dist_d, nNodes);
cudaDeviceSynchronize();
timeElapsed = (GetTime() - clockBegin)/1000000;
for (int i = 0; i < nNodes; i++)
cudaMemcpy(dist[i], &dist_d[i * nNodes], nNodes * sizeof(int), cudaMemcpyDeviceToHost);
#if DEBUG == 1
// Print the shortest distance matrix
printSolution(dist, nNodes);
#endif
printf("%5lf\n", timeElapsed);
for (int i = 0; i < nNodes; i++) {
free(graph[i]);
free(dist[i]);
}
cudaFree(dist_d);
return 0;
}
/* A utility function to print solution */
void printSolution(int **dist, int nNodes)
{
for (int i = 0; i < nNodes; i++)
{
for (int j = 0; j < nNodes; j++)
{
if (dist[i][j] == INF)
cout<<"INF"<<" ";
else
cout<<dist[i][j]<<" ";
}
cout<<endl;
}
}
// This code is contributed by rathbhupendra
|
034b147d85431b5af2a3c2a2624b8c74968b0900.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/strings/detail/converters.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <strings/utilities.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/logical.h>
#include <map>
#include <vector>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Units for timestamp conversion.
* These are defined since there are more than what cudf supports.
*/
enum class timestamp_units {
years, ///< precision is years
months, ///< precision is months
days, ///< precision is days
hours, ///< precision is hours
minutes, ///< precision is minutes
seconds, ///< precision is seconds
ms, ///< precision is milliseconds
us, ///< precision is microseconds
ns ///< precision is nanoseconds
};
// used to index values in a timeparts array
enum timestamp_parse_component {
TP_YEAR = 0,
TP_MONTH = 1,
TP_DAY = 2,
TP_DAY_OF_YEAR = 3,
TP_HOUR = 4,
TP_MINUTE = 5,
TP_SECOND = 6,
TP_SUBSECOND = 7,
TP_TZ_MINUTES = 8,
TP_ARRAYSIZE = 9
};
enum class format_char_type : int8_t {
literal, // literal char type passed through
specifier // timestamp format specifier
};
/**
* @brief Represents a format specifier or literal from a timestamp format string.
*
* Created by the format_compiler when parsing a format string.
*/
struct alignas(4) format_item {
format_char_type item_type; // specifier or literal indicator
char value; // specifier or literal value
int8_t length; // item length in bytes
static format_item new_specifier(char format_char, int8_t length)
{
return format_item{format_char_type::specifier, format_char, length};
}
static format_item new_delimiter(char literal)
{
return format_item{format_char_type::literal, literal, 1};
}
};
/**
* @brief The format_compiler parses a timestamp format string into a vector of
* format_items.
*
* The vector of format_items are used when parsing a string into timestamp
* components and when formatting a string from timestamp components.
*/
struct format_compiler {
std::string format;
std::string template_string;
rmm::device_uvector<format_item> d_items;
std::map<char, int8_t> specifier_lengths = {{'Y', 4},
{'y', 2},
{'m', 2},
{'d', 2},
{'H', 2},
{'I', 2},
{'M', 2},
{'S', 2},
{'f', 6},
{'z', 5},
{'Z', 3},
{'p', 2},
{'j', 3}};
format_compiler(const char* fmt, rmm::cuda_stream_view stream) : format(fmt), d_items(0, stream)
{
std::vector<format_item> items;
const char* str = format.c_str();
auto length = format.length();
while (length > 0) {
char ch = *str++;
length--;
if (ch != '%') {
items.push_back(format_item::new_delimiter(ch));
template_string.append(1, ch);
continue;
}
CUDF_EXPECTS(length > 0, "Unfinished specifier in timestamp format");
ch = *str++;
length--;
if (ch == '%') // escaped % char
{
items.push_back(format_item::new_delimiter(ch));
template_string.append(1, ch);
continue;
}
if (ch >= '0' && ch <= '9') {
CUDF_EXPECTS(*str == 'f', "precision not supported for specifier: " + std::string(1, *str));
specifier_lengths[*str] = static_cast<int8_t>(ch - '0');
ch = *str++;
length--;
}
CUDF_EXPECTS(specifier_lengths.find(ch) != specifier_lengths.end(),
"invalid format specifier: " + std::string(1, ch));
int8_t spec_length = specifier_lengths[ch];
items.push_back(format_item::new_specifier(ch, spec_length));
template_string.append((size_t)spec_length, ch);
}
// create program in device memory
d_items.resize(items.size(), stream);
CUDA_TRY(hipMemcpyAsync(d_items.data(),
items.data(),
items.size() * sizeof(items[0]),
hipMemcpyHostToDevice,
stream.value()));
}
format_item const* format_items() { return d_items.data(); }
size_type template_bytes() const { return static_cast<size_type>(template_string.size()); }
size_type items_count() const { return static_cast<size_type>(d_items.size()); }
int8_t subsecond_precision() const { return specifier_lengths.at('f'); }
};
// this parses date/time characters into a timestamp integer
template <typename T> // timestamp type
struct parse_datetime {
column_device_view const d_strings;
format_item const* d_format_items;
size_type items_count;
timestamp_units units;
int8_t subsecond_precision;
/**
* @brief Return power of ten value given an exponent.
*
* @return `1x10^exponent` for `0 <= exponent <= 9`
*/
__device__ constexpr int64_t power_of_ten(int32_t exponent)
{
constexpr int64_t powers_of_ten[] = {
1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L};
return powers_of_ten[exponent];
}
//
__device__ int32_t str2int(const char* str, size_type bytes)
{
const char* ptr = str;
int32_t value = 0;
for (size_type idx = 0; idx < bytes; ++idx) {
char chr = *ptr++;
if (chr < '0' || chr > '9') break;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return value;
}
// Walk the format_items to read the datetime string.
// Returns 0 if all ok.
__device__ int parse_into_parts(string_view const& d_string, int32_t* timeparts)
{
auto ptr = d_string.data();
auto length = d_string.size_bytes();
for (size_t idx = 0; idx < items_count; ++idx) {
auto item = d_format_items[idx];
if (item.value != 'f')
item.length = static_cast<int8_t>(::min(static_cast<size_type>(item.length), length));
if (item.item_type == format_char_type::literal) {
// static character we'll just skip;
// consume item.length bytes from string
ptr += item.length;
length -= item.length;
continue;
}
// special logic for each specifier
switch (item.value) {
case 'Y': timeparts[TP_YEAR] = str2int(ptr, item.length); break;
case 'y': {
auto const year = str2int(ptr, item.length);
timeparts[TP_YEAR] = year + (year < 69 ? 2000 : 1900);
break;
}
case 'm': timeparts[TP_MONTH] = str2int(ptr, item.length); break;
case 'd': timeparts[TP_DAY] = str2int(ptr, item.length); break;
case 'j': timeparts[TP_DAY_OF_YEAR] = str2int(ptr, item.length); break;
case 'H':
case 'I': timeparts[TP_HOUR] = str2int(ptr, item.length); break;
case 'M': timeparts[TP_MINUTE] = str2int(ptr, item.length); break;
case 'S': timeparts[TP_SECOND] = str2int(ptr, item.length); break;
case 'f': {
int32_t const read_size =
::min(static_cast<int32_t>(item.length), static_cast<int32_t>(length));
int64_t const fraction = str2int(ptr, read_size) * power_of_ten(item.length - read_size);
timeparts[TP_SUBSECOND] = static_cast<int32_t>(fraction);
break;
}
case 'p': {
string_view am_pm(ptr, 2);
auto hour = timeparts[TP_HOUR];
if ((am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0)) {
if (hour == 12) hour = 0;
} else if (hour < 12)
hour += 12;
timeparts[TP_HOUR] = hour;
break;
}
case 'z': {
int sign = *ptr == '-' ? 1 : -1; // revert timezone back to UTC
int hh = str2int(ptr + 1, 2);
int mm = str2int(ptr + 3, 2);
// ignoring the rest for now
// item.length has how many chars we should read
timeparts[TP_TZ_MINUTES] = sign * ((hh * 60) + mm);
break;
}
case 'Z': break; // skip
default: return 3;
}
ptr += item.length;
length -= item.length;
}
return 0;
}
__device__ int64_t timestamp_from_parts(int32_t const* timeparts, timestamp_units units)
{
auto year = timeparts[TP_YEAR];
if (units == timestamp_units::years) return year - 1970;
auto month = timeparts[TP_MONTH];
if (units == timestamp_units::months)
return ((year - 1970) * 12) + (month - 1); // months are 1-12, need to 0-base it here
auto day = timeparts[TP_DAY];
auto ymd = // convenient chrono class handles the leap year calculations for us
cuda::std::chrono::year_month_day(cuda::std::chrono::year{year},
cuda::std::chrono::month{static_cast<uint32_t>(month)},
cuda::std::chrono::day{static_cast<uint32_t>(day)});
int32_t days = cuda::std::chrono::sys_days(ymd).time_since_epoch().count();
if (units == timestamp_units::days) return days;
auto tzadjust = timeparts[TP_TZ_MINUTES]; // in minutes
auto hour = timeparts[TP_HOUR];
if (units == timestamp_units::hours) return (days * 24L) + hour + (tzadjust / 60);
auto minute = timeparts[TP_MINUTE];
if (units == timestamp_units::minutes)
return static_cast<int64_t>(days * 24L * 60L) + (hour * 60L) + minute + tzadjust;
auto second = timeparts[TP_SECOND];
int64_t timestamp =
(days * 24L * 3600L) + (hour * 3600L) + (minute * 60L) + second + (tzadjust * 60);
if (units == timestamp_units::seconds) return timestamp;
int64_t subsecond =
timeparts[TP_SUBSECOND] * power_of_ten(9 - subsecond_precision); // normalize to nanoseconds
if (units == timestamp_units::ms) {
timestamp *= 1000L;
subsecond = subsecond / 1000000L;
} else if (units == timestamp_units::us) {
timestamp *= 1000000L;
subsecond = subsecond / 1000L;
} else if (units == timestamp_units::ns)
timestamp *= 1000000000L;
timestamp += subsecond;
return timestamp;
}
__device__ T operator()(size_type idx)
{
T epoch_time{typename T::duration{0}};
if (d_strings.is_null(idx)) return epoch_time;
string_view d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) return epoch_time;
//
int32_t timeparts[TP_ARRAYSIZE] = {1970, 1, 1}; // month and day are 1-based
if (parse_into_parts(d_str, timeparts)) return epoch_time; // unexpected parse case
//
return T{T::duration(timestamp_from_parts(timeparts, units))};
}
};
// convert cudf type to timestamp units
struct dispatch_timestamp_to_units_fn {
template <typename T>
timestamp_units operator()()
{
CUDF_FAIL("Invalid type for timestamp conversion.");
}
};
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_D>()
{
return timestamp_units::days;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_s>()
{
return timestamp_units::seconds;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ms>()
{
return timestamp_units::ms;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_us>()
{
return timestamp_units::us;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ns>()
{
return timestamp_units::ns;
}
// dispatch operator to map timestamp to native fixed-width-type
struct dispatch_to_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const& d_strings,
std::string const& format,
timestamp_units units,
mutable_column_view& results_view,
rmm::cuda_stream_view stream) const
{
format_compiler compiler(format.c_str(), stream);
auto d_items = compiler.format_items();
auto d_results = results_view.data<T>();
parse_datetime<T> pfn{
d_strings, d_items, compiler.items_count(), units, compiler.subsecond_precision()};
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(results_view.size()),
d_results,
pfn);
}
template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const&,
std::string const&,
timestamp_units,
mutable_column_view&,
rmm::cuda_stream_view) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings,
data_type timestamp_type,
std::string const& format,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_timestamp_column(timestamp_type, 0);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
timestamp_units units = cudf::type_dispatcher(timestamp_type, dispatch_timestamp_to_units_fn());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
auto results = make_timestamp_column(timestamp_type,
strings_count,
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto results_view = results->mutable_view();
cudf::type_dispatcher(
timestamp_type, dispatch_to_timestamps_fn(), d_column, format, units, results_view, stream);
results->set_null_count(strings.null_count());
return results;
}
/**
* @brief Functor checks the strings against the given format items.
*
* This does no data conversion.
*/
struct check_datetime_format {
column_device_view const d_strings;
format_item const* d_format_items;
size_type items_count;
/**
* @brief Check the specified characters are between ['0','9'].
*
* @param str Beginning of characters to check.
* @param bytes Number of bytes to check.
* @return true if all digits are 0-9
*/
__device__ bool check_digits(const char* str, size_type bytes)
{
return thrust::all_of(thrust::seq, str, str + bytes, [] __device__(char chr) {
return (chr >= '0' && chr <= '9');
});
}
/**
* @brief Specialized function to return the value and check for non-decimal characters.
*
* If non-decimal characters are found within `str` and `str + bytes` then
* the returned result is `thrust::nullopt` (_does not contain a value_).
* Otherwise, the parsed integer result is returned.
*
* @param str Beginning of characters to read/check.
* @param bytes Number of bytes in str to read/check.
* @return Integer value if characters are valid.
*/
__device__ thrust::optional<int32_t> str2int(const char* str, size_type bytes)
{
const char* ptr = str;
int32_t value = 0;
for (size_type idx = 0; idx < bytes; ++idx) {
char chr = *ptr++;
if (chr < '0' || chr > '9') return thrust::nullopt;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return value;
}
/**
* @brief Check the specified characters are between ['0','9']
* and the resulting integer is within [`min_value`, `max_value`].
*
* @param str Beginning of characters to check.
* @param bytes Number of bytes to check.
* @param min_value Inclusive minimum value
* @param max_value Inclusive maximum value
* @return true if parsed value is between `min_value` and `max_value`.
*/
__device__ bool check_value(const char* str, size_type bytes, int min_value, int max_value)
{
const char* ptr = str;
int32_t value = 0;
for (size_type idx = 0; idx < bytes; ++idx) {
char chr = *ptr++;
if (chr < '0' || chr > '9') return false;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return value >= min_value && value <= max_value;
}
/**
* @brief Check the string matches the format.
*
* Walk the `format_items` as we read the string characters
* checking the characters are valid for each format specifier.
* The checking here is a little more strict than the actual
* parser used for conversion.
*/
__device__ bool check_string(string_view const& d_string, int32_t* dateparts)
{
auto ptr = d_string.data();
auto length = d_string.size_bytes();
for (size_t idx = 0; idx < items_count; ++idx) {
auto item = d_format_items[idx];
// eliminate static character values first
if (item.item_type == format_char_type::literal) {
// check static character matches
if (*ptr != item.value) return false;
ptr += item.length;
length -= item.length;
continue;
}
// allow for specifiers to be truncated
if (item.value != 'f')
item.length = static_cast<int8_t>(::min(static_cast<size_type>(item.length), length));
// special logic for each specifier
// reference: https://man7.org/linux/man-pages/man3/strptime.3.html
bool result = false;
switch (item.value) {
case 'Y': {
if (auto value = str2int(ptr, item.length)) {
result = true;
dateparts[TP_YEAR] = value.value();
}
break;
}
case 'y': {
if (auto value = str2int(ptr, item.length)) {
result = true;
auto const year = value.value();
dateparts[TP_YEAR] = year + (year < 69 ? 2000 : 1900);
}
break;
}
case 'm': {
if (auto value = str2int(ptr, item.length)) {
result = true;
dateparts[TP_MONTH] = value.value();
}
break;
}
case 'd': {
if (auto value = str2int(ptr, item.length)) {
result = true;
dateparts[TP_DAY] = value.value();
}
break;
}
case 'j': result = check_value(ptr, item.length, 1, 366); break;
case 'H': result = check_value(ptr, item.length, 0, 23); break;
case 'I': result = check_value(ptr, item.length, 1, 12); break;
case 'M': result = check_value(ptr, item.length, 0, 59); break;
case 'S': result = check_value(ptr, item.length, 0, 60); break;
case 'f': {
result = check_digits(ptr, ::min(static_cast<int32_t>(item.length), length));
break;
}
case 'p': {
if (item.length == 2) {
string_view am_pm(ptr, 2);
result = (am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0) ||
(am_pm.compare("PM", 2) == 0) || (am_pm.compare("pm", 2) == 0);
}
break;
}
case 'z': { // timezone offset
if (item.length == 5) {
result = (*ptr == '-' || *ptr == '+') && // sign
check_value(ptr + 1, 2, 0, 23) && // hour
check_value(ptr + 3, 2, 0, 59); // minute
}
break;
}
case 'Z': result = true; // skip
default: break;
}
if (!result) return false;
ptr += item.length;
length -= item.length;
}
return true;
}
__device__ bool operator()(size_type idx)
{
if (d_strings.is_null(idx)) return false;
string_view d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) return false;
int32_t dateparts[] = {1970, 1, 1}; // year, month, day
if (!check_string(d_str, dateparts)) return false;
auto year = dateparts[TP_YEAR];
auto month = static_cast<uint32_t>(dateparts[TP_MONTH]);
auto day = static_cast<uint32_t>(dateparts[TP_DAY]);
return cuda::std::chrono::year_month_day(cuda::std::chrono::year{year},
cuda::std::chrono::month{month},
cuda::std::chrono::day{day})
.ok();
}
};
std::unique_ptr<cudf::column> is_timestamp(strings_column_view const& strings,
std::string const& format,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_column(data_type{type_id::BOOL8});
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
auto results = make_numeric_column(data_type{type_id::BOOL8},
strings_count,
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto d_results = results->mutable_view().data<bool>();
format_compiler compiler(format.c_str(), stream);
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results,
check_datetime_format{d_strings, compiler.format_items(), compiler.items_count()});
results->set_null_count(strings.null_count());
return results;
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings,
data_type timestamp_type,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_timestamps(strings, timestamp_type, format, rmm::cuda_stream_default, mr);
}
std::unique_ptr<cudf::column> is_timestamp(strings_column_view const& strings,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_timestamp(strings, format, rmm::cuda_stream_default, mr);
}
namespace detail {
namespace {
// converts a timestamp into date-time string
template <typename T>
struct datetime_formatter {
const column_device_view d_timestamps;
const format_item* d_format_items;
size_type items_count;
timestamp_units units;
const int32_t* d_offsets;
char* d_chars;
__device__ cudf::timestamp_D::duration convert_to_days(int64_t timestamp, timestamp_units units)
{
using namespace cuda::std::chrono;
using minutes = duration<timestamp_s::rep, minutes::period>;
using hours = duration<timestamp_s::rep, hours::period>;
switch (units) {
case timestamp_units::minutes: return floor<days>(minutes(timestamp));
case timestamp_units::seconds: return floor<days>(cudf::timestamp_s::duration(timestamp));
case timestamp_units::hours: return floor<days>(hours(timestamp));
case timestamp_units::ms: return floor<days>(cudf::timestamp_ms::duration(timestamp));
case timestamp_units::us: return floor<days>(cudf::timestamp_us::duration(timestamp));
case timestamp_units::ns: return floor<days>(cudf::timestamp_ns::duration(timestamp));
default: return cudf::timestamp_D::duration(timestamp);
}
}
// divide timestamp integer into time components (year, month, day, etc)
// TODO call the cuda::std::chrono methods here instead when they are ready
__device__ void dissect_timestamp(int64_t timestamp, int32_t* timeparts)
{
if (units == timestamp_units::years) {
timeparts[TP_YEAR] = static_cast<int32_t>(timestamp) + 1970;
timeparts[TP_MONTH] = 1;
timeparts[TP_DAY] = 1;
return;
}
// Specialized modulo expression that handles negative values.
// Examples:
// modulo(1,60) 1
// modulo(-1,60) 59
auto modulo_time = [](int64_t time, int64_t base) {
return static_cast<int32_t>(((time % base) + base) % base);
};
// This function handles converting units by dividing and adjusting for negative values.
// Examples:
// scale(-61,60) -2
// scale(-60,60) -1
// scale(-59,60) -1
// scale( 59,60) 0
// scale( 60,60) 1
// scale( 61,60) 1
auto scale_time = [](int64_t time, int64_t base) {
return static_cast<int32_t>((time - ((time < 0) * (base - 1L))) / base);
};
if (units == timestamp_units::months) {
int32_t month = modulo_time(timestamp, 12);
int32_t year = scale_time(timestamp, 12) + 1970;
timeparts[TP_YEAR] = year;
timeparts[TP_MONTH] = month + 1; // months start at 1 and not 0
timeparts[TP_DAY] = 1;
return;
}
// first, convert to days so we can handle months, years, day of the year.
auto const days = convert_to_days(timestamp, units);
auto const ymd = cuda::std::chrono::year_month_day(cuda::std::chrono::sys_days(days));
auto const year = static_cast<int32_t>(ymd.year());
auto const month = static_cast<unsigned>(ymd.month());
auto const day = static_cast<unsigned>(ymd.day());
int32_t const monthDayOffset[] = {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
timeparts[TP_DAY_OF_YEAR] =
day + monthDayOffset[month - 1] + (month > 2 and ymd.year().is_leap());
timeparts[TP_YEAR] = year;
timeparts[TP_MONTH] = month;
timeparts[TP_DAY] = day;
if (units == timestamp_units::days) return;
// done with date, now work on time
if (units == timestamp_units::hours) {
timeparts[TP_HOUR] = modulo_time(timestamp, 24);
return;
}
if (units == timestamp_units::minutes) {
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 60), 24);
timeparts[TP_MINUTE] = modulo_time(timestamp, 60);
return;
}
if (units == timestamp_units::seconds) {
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 3600), 24);
timeparts[TP_MINUTE] = modulo_time(scale_time(timestamp, 60), 60);
timeparts[TP_SECOND] = modulo_time(timestamp, 60);
return;
}
// common utility for setting time components from a subsecond unit value
auto subsecond_fn = [&](int64_t subsecond_base) {
auto subsecond = modulo_time(timestamp, subsecond_base);
timestamp = timestamp / subsecond_base - ((timestamp < 0) and (subsecond != 0));
timeparts[TP_SUBSECOND] = subsecond;
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 3600), 24);
timeparts[TP_MINUTE] = modulo_time(scale_time(timestamp, 60), 60);
timeparts[TP_SECOND] = modulo_time(timestamp, 60);
};
if (units == timestamp_units::ms)
subsecond_fn(1000);
else if (units == timestamp_units::us)
subsecond_fn(1000000);
else
subsecond_fn(1000000000);
}
// utility to create 0-padded integers (up to 9 chars)
__device__ char* int2str(char* str, int bytes, int val)
{
char tmpl[9] = {'0', '0', '0', '0', '0', '0', '0', '0', '0'};
char* ptr = tmpl;
while (val > 0) {
int digit = val % 10;
*ptr++ = '0' + digit;
val = val / 10;
}
ptr = tmpl + bytes - 1;
while (bytes-- > 0) *str++ = *ptr--;
return str;
}
__device__ char* format_from_parts(int32_t const* timeparts, char* ptr)
{
for (size_t idx = 0; idx < items_count; ++idx) {
auto item = d_format_items[idx];
if (item.item_type == format_char_type::literal) {
*ptr++ = item.value;
continue;
}
// special logic for each specifier
switch (item.value) {
case 'Y': // 4-digit year
ptr = int2str(ptr, item.length, timeparts[TP_YEAR]);
break;
case 'y': // 2-digit year
{
auto year = timeparts[TP_YEAR];
// remove hundredths digits and above
ptr = int2str(ptr, item.length, year - ((year / 100) * 100));
break;
}
case 'm': // month
ptr = int2str(ptr, item.length, timeparts[TP_MONTH]);
break;
case 'd': // day of month
ptr = int2str(ptr, item.length, timeparts[TP_DAY]);
break;
case 'j': // day of year
ptr = int2str(ptr, item.length, timeparts[TP_DAY_OF_YEAR]);
break;
case 'H': // 24-hour
ptr = int2str(ptr, item.length, timeparts[TP_HOUR]);
break;
case 'I': // 12-hour
{
// 0 = 12am; 12 = 12pm; 6 = 06am; 18 = 06pm
auto hour = timeparts[TP_HOUR];
if (hour == 0) hour = 12;
if (hour > 12) hour -= 12;
ptr = int2str(ptr, item.length, hour);
break;
}
case 'M': // minute
ptr = int2str(ptr, item.length, timeparts[TP_MINUTE]);
break;
case 'S': // second
ptr = int2str(ptr, item.length, timeparts[TP_SECOND]);
break;
case 'f': // sub-second
{
char subsecond_digits[] = "000000000"; // 9 max digits
const int digits = [units = units] {
if (units == timestamp_units::ms) return 3;
if (units == timestamp_units::us) return 6;
if (units == timestamp_units::ns) return 9;
return 0;
}();
int2str(subsecond_digits, digits, timeparts[TP_SUBSECOND]);
ptr = copy_and_increment(ptr, subsecond_digits, item.length);
break;
}
case 'p': // am or pm
// 0 = 12am, 12 = 12pm
if (timeparts[TP_HOUR] < 12)
memcpy(ptr, "AM", 2);
else
memcpy(ptr, "PM", 2);
ptr += 2;
break;
case 'z': // timezone
memcpy(ptr, "+0000", 5); // always UTC
ptr += 5;
break;
case 'Z':
memcpy(ptr, "UTC", 3);
ptr += 3;
break;
default: // ignore everything else
break;
}
}
return ptr;
}
__device__ void operator()(size_type idx)
{
if (d_timestamps.is_null(idx)) return;
auto timestamp = d_timestamps.element<T>(idx);
int32_t timeparts[TP_ARRAYSIZE] = {0};
dissect_timestamp(timestamp.time_since_epoch().count(), timeparts);
// convert to characters
format_from_parts(timeparts, d_chars + d_offsets[idx]);
}
};
//
struct dispatch_from_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const& d_timestamps,
format_item const* d_format_items,
size_type items_count,
timestamp_units units,
const int32_t* d_offsets,
char* d_chars,
rmm::cuda_stream_view stream) const
{
datetime_formatter<T> pfn{d_timestamps, d_format_items, items_count, units, d_offsets, d_chars};
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
d_timestamps.size(),
pfn);
}
template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const&,
format_item const*,
size_type,
timestamp_units,
const int32_t*,
char* d_chars,
rmm::cuda_stream_view stream) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = timestamps.size();
if (strings_count == 0) return make_empty_strings_column(stream, mr);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
timestamp_units units =
cudf::type_dispatcher(timestamps.type(), dispatch_timestamp_to_units_fn());
format_compiler compiler(format.c_str(), stream);
auto d_format_items = compiler.format_items();
auto column = column_device_view::create(timestamps, stream);
auto d_column = *column;
// copy null mask
rmm::device_buffer null_mask = cudf::detail::copy_bitmask(timestamps, stream, mr);
// Each string will be the same number of bytes which can be determined
// directly from the format string.
auto d_str_bytes = compiler.template_bytes(); // size in bytes of each string
// build offsets column
auto offsets_transformer_itr =
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
[d_column, d_str_bytes] __device__(size_type idx) {
return (d_column.is_null(idx) ? 0 : d_str_bytes);
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto offsets_view = offsets_column->view();
auto d_new_offsets = offsets_view.template data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_new_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, timestamps.null_count(), bytes, stream, mr);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.template data<char>();
// fill in chars column with timestamps
// dispatcher is called to handle the different timestamp types
cudf::type_dispatcher(timestamps.type(),
dispatch_from_timestamps_fn(),
d_column,
d_format_items,
compiler.items_count(),
units,
d_new_offsets,
d_chars,
stream);
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
timestamps.null_count(),
std::move(null_mask),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::from_timestamps(timestamps, format, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
| 034b147d85431b5af2a3c2a2624b8c74968b0900.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/strings/detail/converters.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <strings/utilities.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/logical.h>
#include <map>
#include <vector>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Units for timestamp conversion.
* These are defined since there are more than what cudf supports.
*/
enum class timestamp_units {
years, ///< precision is years
months, ///< precision is months
days, ///< precision is days
hours, ///< precision is hours
minutes, ///< precision is minutes
seconds, ///< precision is seconds
ms, ///< precision is milliseconds
us, ///< precision is microseconds
ns ///< precision is nanoseconds
};
// used to index values in a timeparts array
enum timestamp_parse_component {
TP_YEAR = 0,
TP_MONTH = 1,
TP_DAY = 2,
TP_DAY_OF_YEAR = 3,
TP_HOUR = 4,
TP_MINUTE = 5,
TP_SECOND = 6,
TP_SUBSECOND = 7,
TP_TZ_MINUTES = 8,
TP_ARRAYSIZE = 9
};
enum class format_char_type : int8_t {
literal, // literal char type passed through
specifier // timestamp format specifier
};
/**
* @brief Represents a format specifier or literal from a timestamp format string.
*
* Created by the format_compiler when parsing a format string.
*/
struct alignas(4) format_item {
format_char_type item_type; // specifier or literal indicator
char value; // specifier or literal value
int8_t length; // item length in bytes
static format_item new_specifier(char format_char, int8_t length)
{
return format_item{format_char_type::specifier, format_char, length};
}
static format_item new_delimiter(char literal)
{
return format_item{format_char_type::literal, literal, 1};
}
};
/**
* @brief The format_compiler parses a timestamp format string into a vector of
* format_items.
*
* The vector of format_items are used when parsing a string into timestamp
* components and when formatting a string from timestamp components.
*/
struct format_compiler {
std::string format;
std::string template_string;
rmm::device_uvector<format_item> d_items;
std::map<char, int8_t> specifier_lengths = {{'Y', 4},
{'y', 2},
{'m', 2},
{'d', 2},
{'H', 2},
{'I', 2},
{'M', 2},
{'S', 2},
{'f', 6},
{'z', 5},
{'Z', 3},
{'p', 2},
{'j', 3}};
format_compiler(const char* fmt, rmm::cuda_stream_view stream) : format(fmt), d_items(0, stream)
{
std::vector<format_item> items;
const char* str = format.c_str();
auto length = format.length();
while (length > 0) {
char ch = *str++;
length--;
if (ch != '%') {
items.push_back(format_item::new_delimiter(ch));
template_string.append(1, ch);
continue;
}
CUDF_EXPECTS(length > 0, "Unfinished specifier in timestamp format");
ch = *str++;
length--;
if (ch == '%') // escaped % char
{
items.push_back(format_item::new_delimiter(ch));
template_string.append(1, ch);
continue;
}
if (ch >= '0' && ch <= '9') {
CUDF_EXPECTS(*str == 'f', "precision not supported for specifier: " + std::string(1, *str));
specifier_lengths[*str] = static_cast<int8_t>(ch - '0');
ch = *str++;
length--;
}
CUDF_EXPECTS(specifier_lengths.find(ch) != specifier_lengths.end(),
"invalid format specifier: " + std::string(1, ch));
int8_t spec_length = specifier_lengths[ch];
items.push_back(format_item::new_specifier(ch, spec_length));
template_string.append((size_t)spec_length, ch);
}
// create program in device memory
d_items.resize(items.size(), stream);
CUDA_TRY(cudaMemcpyAsync(d_items.data(),
items.data(),
items.size() * sizeof(items[0]),
cudaMemcpyHostToDevice,
stream.value()));
}
format_item const* format_items() { return d_items.data(); }
size_type template_bytes() const { return static_cast<size_type>(template_string.size()); }
size_type items_count() const { return static_cast<size_type>(d_items.size()); }
int8_t subsecond_precision() const { return specifier_lengths.at('f'); }
};
// this parses date/time characters into a timestamp integer
template <typename T> // timestamp type
struct parse_datetime {
column_device_view const d_strings;
format_item const* d_format_items;
size_type items_count;
timestamp_units units;
int8_t subsecond_precision;
/**
* @brief Return power of ten value given an exponent.
*
* @return `1x10^exponent` for `0 <= exponent <= 9`
*/
__device__ constexpr int64_t power_of_ten(int32_t exponent)
{
constexpr int64_t powers_of_ten[] = {
1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L};
return powers_of_ten[exponent];
}
//
__device__ int32_t str2int(const char* str, size_type bytes)
{
const char* ptr = str;
int32_t value = 0;
for (size_type idx = 0; idx < bytes; ++idx) {
char chr = *ptr++;
if (chr < '0' || chr > '9') break;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return value;
}
// Walk the format_items to read the datetime string.
// Returns 0 if all ok.
__device__ int parse_into_parts(string_view const& d_string, int32_t* timeparts)
{
auto ptr = d_string.data();
auto length = d_string.size_bytes();
for (size_t idx = 0; idx < items_count; ++idx) {
auto item = d_format_items[idx];
if (item.value != 'f')
item.length = static_cast<int8_t>(std::min(static_cast<size_type>(item.length), length));
if (item.item_type == format_char_type::literal) {
// static character we'll just skip;
// consume item.length bytes from string
ptr += item.length;
length -= item.length;
continue;
}
// special logic for each specifier
switch (item.value) {
case 'Y': timeparts[TP_YEAR] = str2int(ptr, item.length); break;
case 'y': {
auto const year = str2int(ptr, item.length);
timeparts[TP_YEAR] = year + (year < 69 ? 2000 : 1900);
break;
}
case 'm': timeparts[TP_MONTH] = str2int(ptr, item.length); break;
case 'd': timeparts[TP_DAY] = str2int(ptr, item.length); break;
case 'j': timeparts[TP_DAY_OF_YEAR] = str2int(ptr, item.length); break;
case 'H':
case 'I': timeparts[TP_HOUR] = str2int(ptr, item.length); break;
case 'M': timeparts[TP_MINUTE] = str2int(ptr, item.length); break;
case 'S': timeparts[TP_SECOND] = str2int(ptr, item.length); break;
case 'f': {
int32_t const read_size =
std::min(static_cast<int32_t>(item.length), static_cast<int32_t>(length));
int64_t const fraction = str2int(ptr, read_size) * power_of_ten(item.length - read_size);
timeparts[TP_SUBSECOND] = static_cast<int32_t>(fraction);
break;
}
case 'p': {
string_view am_pm(ptr, 2);
auto hour = timeparts[TP_HOUR];
if ((am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0)) {
if (hour == 12) hour = 0;
} else if (hour < 12)
hour += 12;
timeparts[TP_HOUR] = hour;
break;
}
case 'z': {
int sign = *ptr == '-' ? 1 : -1; // revert timezone back to UTC
int hh = str2int(ptr + 1, 2);
int mm = str2int(ptr + 3, 2);
// ignoring the rest for now
// item.length has how many chars we should read
timeparts[TP_TZ_MINUTES] = sign * ((hh * 60) + mm);
break;
}
case 'Z': break; // skip
default: return 3;
}
ptr += item.length;
length -= item.length;
}
return 0;
}
__device__ int64_t timestamp_from_parts(int32_t const* timeparts, timestamp_units units)
{
auto year = timeparts[TP_YEAR];
if (units == timestamp_units::years) return year - 1970;
auto month = timeparts[TP_MONTH];
if (units == timestamp_units::months)
return ((year - 1970) * 12) + (month - 1); // months are 1-12, need to 0-base it here
auto day = timeparts[TP_DAY];
auto ymd = // convenient chrono class handles the leap year calculations for us
cuda::std::chrono::year_month_day(cuda::std::chrono::year{year},
cuda::std::chrono::month{static_cast<uint32_t>(month)},
cuda::std::chrono::day{static_cast<uint32_t>(day)});
int32_t days = cuda::std::chrono::sys_days(ymd).time_since_epoch().count();
if (units == timestamp_units::days) return days;
auto tzadjust = timeparts[TP_TZ_MINUTES]; // in minutes
auto hour = timeparts[TP_HOUR];
if (units == timestamp_units::hours) return (days * 24L) + hour + (tzadjust / 60);
auto minute = timeparts[TP_MINUTE];
if (units == timestamp_units::minutes)
return static_cast<int64_t>(days * 24L * 60L) + (hour * 60L) + minute + tzadjust;
auto second = timeparts[TP_SECOND];
int64_t timestamp =
(days * 24L * 3600L) + (hour * 3600L) + (minute * 60L) + second + (tzadjust * 60);
if (units == timestamp_units::seconds) return timestamp;
int64_t subsecond =
timeparts[TP_SUBSECOND] * power_of_ten(9 - subsecond_precision); // normalize to nanoseconds
if (units == timestamp_units::ms) {
timestamp *= 1000L;
subsecond = subsecond / 1000000L;
} else if (units == timestamp_units::us) {
timestamp *= 1000000L;
subsecond = subsecond / 1000L;
} else if (units == timestamp_units::ns)
timestamp *= 1000000000L;
timestamp += subsecond;
return timestamp;
}
__device__ T operator()(size_type idx)
{
T epoch_time{typename T::duration{0}};
if (d_strings.is_null(idx)) return epoch_time;
string_view d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) return epoch_time;
//
int32_t timeparts[TP_ARRAYSIZE] = {1970, 1, 1}; // month and day are 1-based
if (parse_into_parts(d_str, timeparts)) return epoch_time; // unexpected parse case
//
return T{T::duration(timestamp_from_parts(timeparts, units))};
}
};
// convert cudf type to timestamp units
struct dispatch_timestamp_to_units_fn {
template <typename T>
timestamp_units operator()()
{
CUDF_FAIL("Invalid type for timestamp conversion.");
}
};
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_D>()
{
return timestamp_units::days;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_s>()
{
return timestamp_units::seconds;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ms>()
{
return timestamp_units::ms;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_us>()
{
return timestamp_units::us;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ns>()
{
return timestamp_units::ns;
}
// dispatch operator to map timestamp to native fixed-width-type
struct dispatch_to_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const& d_strings,
std::string const& format,
timestamp_units units,
mutable_column_view& results_view,
rmm::cuda_stream_view stream) const
{
format_compiler compiler(format.c_str(), stream);
auto d_items = compiler.format_items();
auto d_results = results_view.data<T>();
parse_datetime<T> pfn{
d_strings, d_items, compiler.items_count(), units, compiler.subsecond_precision()};
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(results_view.size()),
d_results,
pfn);
}
template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const&,
std::string const&,
timestamp_units,
mutable_column_view&,
rmm::cuda_stream_view) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings,
data_type timestamp_type,
std::string const& format,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_timestamp_column(timestamp_type, 0);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
timestamp_units units = cudf::type_dispatcher(timestamp_type, dispatch_timestamp_to_units_fn());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
auto results = make_timestamp_column(timestamp_type,
strings_count,
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto results_view = results->mutable_view();
cudf::type_dispatcher(
timestamp_type, dispatch_to_timestamps_fn(), d_column, format, units, results_view, stream);
results->set_null_count(strings.null_count());
return results;
}
/**
* @brief Functor checks the strings against the given format items.
*
* This does no data conversion.
*/
struct check_datetime_format {
column_device_view const d_strings;
format_item const* d_format_items;
size_type items_count;
/**
* @brief Check the specified characters are between ['0','9'].
*
* @param str Beginning of characters to check.
* @param bytes Number of bytes to check.
* @return true if all digits are 0-9
*/
__device__ bool check_digits(const char* str, size_type bytes)
{
return thrust::all_of(thrust::seq, str, str + bytes, [] __device__(char chr) {
return (chr >= '0' && chr <= '9');
});
}
/**
* @brief Specialized function to return the value and check for non-decimal characters.
*
* If non-decimal characters are found within `str` and `str + bytes` then
* the returned result is `thrust::nullopt` (_does not contain a value_).
* Otherwise, the parsed integer result is returned.
*
* @param str Beginning of characters to read/check.
* @param bytes Number of bytes in str to read/check.
* @return Integer value if characters are valid.
*/
__device__ thrust::optional<int32_t> str2int(const char* str, size_type bytes)
{
const char* ptr = str;
int32_t value = 0;
for (size_type idx = 0; idx < bytes; ++idx) {
char chr = *ptr++;
if (chr < '0' || chr > '9') return thrust::nullopt;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return value;
}
/**
* @brief Check the specified characters are between ['0','9']
* and the resulting integer is within [`min_value`, `max_value`].
*
* @param str Beginning of characters to check.
* @param bytes Number of bytes to check.
* @param min_value Inclusive minimum value
* @param max_value Inclusive maximum value
* @return true if parsed value is between `min_value` and `max_value`.
*/
__device__ bool check_value(const char* str, size_type bytes, int min_value, int max_value)
{
const char* ptr = str;
int32_t value = 0;
for (size_type idx = 0; idx < bytes; ++idx) {
char chr = *ptr++;
if (chr < '0' || chr > '9') return false;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return value >= min_value && value <= max_value;
}
/**
* @brief Check the string matches the format.
*
* Walk the `format_items` as we read the string characters
* checking the characters are valid for each format specifier.
* The checking here is a little more strict than the actual
* parser used for conversion.
*/
__device__ bool check_string(string_view const& d_string, int32_t* dateparts)
{
auto ptr = d_string.data();
auto length = d_string.size_bytes();
for (size_t idx = 0; idx < items_count; ++idx) {
auto item = d_format_items[idx];
// eliminate static character values first
if (item.item_type == format_char_type::literal) {
// check static character matches
if (*ptr != item.value) return false;
ptr += item.length;
length -= item.length;
continue;
}
// allow for specifiers to be truncated
if (item.value != 'f')
item.length = static_cast<int8_t>(std::min(static_cast<size_type>(item.length), length));
// special logic for each specifier
// reference: https://man7.org/linux/man-pages/man3/strptime.3.html
bool result = false;
switch (item.value) {
case 'Y': {
if (auto value = str2int(ptr, item.length)) {
result = true;
dateparts[TP_YEAR] = value.value();
}
break;
}
case 'y': {
if (auto value = str2int(ptr, item.length)) {
result = true;
auto const year = value.value();
dateparts[TP_YEAR] = year + (year < 69 ? 2000 : 1900);
}
break;
}
case 'm': {
if (auto value = str2int(ptr, item.length)) {
result = true;
dateparts[TP_MONTH] = value.value();
}
break;
}
case 'd': {
if (auto value = str2int(ptr, item.length)) {
result = true;
dateparts[TP_DAY] = value.value();
}
break;
}
case 'j': result = check_value(ptr, item.length, 1, 366); break;
case 'H': result = check_value(ptr, item.length, 0, 23); break;
case 'I': result = check_value(ptr, item.length, 1, 12); break;
case 'M': result = check_value(ptr, item.length, 0, 59); break;
case 'S': result = check_value(ptr, item.length, 0, 60); break;
case 'f': {
result = check_digits(ptr, std::min(static_cast<int32_t>(item.length), length));
break;
}
case 'p': {
if (item.length == 2) {
string_view am_pm(ptr, 2);
result = (am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0) ||
(am_pm.compare("PM", 2) == 0) || (am_pm.compare("pm", 2) == 0);
}
break;
}
case 'z': { // timezone offset
if (item.length == 5) {
result = (*ptr == '-' || *ptr == '+') && // sign
check_value(ptr + 1, 2, 0, 23) && // hour
check_value(ptr + 3, 2, 0, 59); // minute
}
break;
}
case 'Z': result = true; // skip
default: break;
}
if (!result) return false;
ptr += item.length;
length -= item.length;
}
return true;
}
__device__ bool operator()(size_type idx)
{
if (d_strings.is_null(idx)) return false;
string_view d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) return false;
int32_t dateparts[] = {1970, 1, 1}; // year, month, day
if (!check_string(d_str, dateparts)) return false;
auto year = dateparts[TP_YEAR];
auto month = static_cast<uint32_t>(dateparts[TP_MONTH]);
auto day = static_cast<uint32_t>(dateparts[TP_DAY]);
return cuda::std::chrono::year_month_day(cuda::std::chrono::year{year},
cuda::std::chrono::month{month},
cuda::std::chrono::day{day})
.ok();
}
};
std::unique_ptr<cudf::column> is_timestamp(strings_column_view const& strings,
std::string const& format,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_column(data_type{type_id::BOOL8});
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
auto results = make_numeric_column(data_type{type_id::BOOL8},
strings_count,
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto d_results = results->mutable_view().data<bool>();
format_compiler compiler(format.c_str(), stream);
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results,
check_datetime_format{d_strings, compiler.format_items(), compiler.items_count()});
results->set_null_count(strings.null_count());
return results;
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings,
data_type timestamp_type,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_timestamps(strings, timestamp_type, format, rmm::cuda_stream_default, mr);
}
std::unique_ptr<cudf::column> is_timestamp(strings_column_view const& strings,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_timestamp(strings, format, rmm::cuda_stream_default, mr);
}
namespace detail {
namespace {
// converts a timestamp into date-time string
template <typename T>
struct datetime_formatter {
const column_device_view d_timestamps;
const format_item* d_format_items;
size_type items_count;
timestamp_units units;
const int32_t* d_offsets;
char* d_chars;
__device__ cudf::timestamp_D::duration convert_to_days(int64_t timestamp, timestamp_units units)
{
using namespace cuda::std::chrono;
using minutes = duration<timestamp_s::rep, minutes::period>;
using hours = duration<timestamp_s::rep, hours::period>;
switch (units) {
case timestamp_units::minutes: return floor<days>(minutes(timestamp));
case timestamp_units::seconds: return floor<days>(cudf::timestamp_s::duration(timestamp));
case timestamp_units::hours: return floor<days>(hours(timestamp));
case timestamp_units::ms: return floor<days>(cudf::timestamp_ms::duration(timestamp));
case timestamp_units::us: return floor<days>(cudf::timestamp_us::duration(timestamp));
case timestamp_units::ns: return floor<days>(cudf::timestamp_ns::duration(timestamp));
default: return cudf::timestamp_D::duration(timestamp);
}
}
// divide timestamp integer into time components (year, month, day, etc)
// TODO call the cuda::std::chrono methods here instead when they are ready
__device__ void dissect_timestamp(int64_t timestamp, int32_t* timeparts)
{
if (units == timestamp_units::years) {
timeparts[TP_YEAR] = static_cast<int32_t>(timestamp) + 1970;
timeparts[TP_MONTH] = 1;
timeparts[TP_DAY] = 1;
return;
}
// Specialized modulo expression that handles negative values.
// Examples:
// modulo(1,60) 1
// modulo(-1,60) 59
auto modulo_time = [](int64_t time, int64_t base) {
return static_cast<int32_t>(((time % base) + base) % base);
};
// This function handles converting units by dividing and adjusting for negative values.
// Examples:
// scale(-61,60) -2
// scale(-60,60) -1
// scale(-59,60) -1
// scale( 59,60) 0
// scale( 60,60) 1
// scale( 61,60) 1
auto scale_time = [](int64_t time, int64_t base) {
return static_cast<int32_t>((time - ((time < 0) * (base - 1L))) / base);
};
if (units == timestamp_units::months) {
int32_t month = modulo_time(timestamp, 12);
int32_t year = scale_time(timestamp, 12) + 1970;
timeparts[TP_YEAR] = year;
timeparts[TP_MONTH] = month + 1; // months start at 1 and not 0
timeparts[TP_DAY] = 1;
return;
}
// first, convert to days so we can handle months, years, day of the year.
auto const days = convert_to_days(timestamp, units);
auto const ymd = cuda::std::chrono::year_month_day(cuda::std::chrono::sys_days(days));
auto const year = static_cast<int32_t>(ymd.year());
auto const month = static_cast<unsigned>(ymd.month());
auto const day = static_cast<unsigned>(ymd.day());
int32_t const monthDayOffset[] = {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
timeparts[TP_DAY_OF_YEAR] =
day + monthDayOffset[month - 1] + (month > 2 and ymd.year().is_leap());
timeparts[TP_YEAR] = year;
timeparts[TP_MONTH] = month;
timeparts[TP_DAY] = day;
if (units == timestamp_units::days) return;
// done with date, now work on time
if (units == timestamp_units::hours) {
timeparts[TP_HOUR] = modulo_time(timestamp, 24);
return;
}
if (units == timestamp_units::minutes) {
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 60), 24);
timeparts[TP_MINUTE] = modulo_time(timestamp, 60);
return;
}
if (units == timestamp_units::seconds) {
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 3600), 24);
timeparts[TP_MINUTE] = modulo_time(scale_time(timestamp, 60), 60);
timeparts[TP_SECOND] = modulo_time(timestamp, 60);
return;
}
// common utility for setting time components from a subsecond unit value
auto subsecond_fn = [&](int64_t subsecond_base) {
auto subsecond = modulo_time(timestamp, subsecond_base);
timestamp = timestamp / subsecond_base - ((timestamp < 0) and (subsecond != 0));
timeparts[TP_SUBSECOND] = subsecond;
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 3600), 24);
timeparts[TP_MINUTE] = modulo_time(scale_time(timestamp, 60), 60);
timeparts[TP_SECOND] = modulo_time(timestamp, 60);
};
if (units == timestamp_units::ms)
subsecond_fn(1000);
else if (units == timestamp_units::us)
subsecond_fn(1000000);
else
subsecond_fn(1000000000);
}
// utility to create 0-padded integers (up to 9 chars)
__device__ char* int2str(char* str, int bytes, int val)
{
char tmpl[9] = {'0', '0', '0', '0', '0', '0', '0', '0', '0'};
char* ptr = tmpl;
while (val > 0) {
int digit = val % 10;
*ptr++ = '0' + digit;
val = val / 10;
}
ptr = tmpl + bytes - 1;
while (bytes-- > 0) *str++ = *ptr--;
return str;
}
__device__ char* format_from_parts(int32_t const* timeparts, char* ptr)
{
for (size_t idx = 0; idx < items_count; ++idx) {
auto item = d_format_items[idx];
if (item.item_type == format_char_type::literal) {
*ptr++ = item.value;
continue;
}
// special logic for each specifier
switch (item.value) {
case 'Y': // 4-digit year
ptr = int2str(ptr, item.length, timeparts[TP_YEAR]);
break;
case 'y': // 2-digit year
{
auto year = timeparts[TP_YEAR];
// remove hundredths digits and above
ptr = int2str(ptr, item.length, year - ((year / 100) * 100));
break;
}
case 'm': // month
ptr = int2str(ptr, item.length, timeparts[TP_MONTH]);
break;
case 'd': // day of month
ptr = int2str(ptr, item.length, timeparts[TP_DAY]);
break;
case 'j': // day of year
ptr = int2str(ptr, item.length, timeparts[TP_DAY_OF_YEAR]);
break;
case 'H': // 24-hour
ptr = int2str(ptr, item.length, timeparts[TP_HOUR]);
break;
case 'I': // 12-hour
{
// 0 = 12am; 12 = 12pm; 6 = 06am; 18 = 06pm
auto hour = timeparts[TP_HOUR];
if (hour == 0) hour = 12;
if (hour > 12) hour -= 12;
ptr = int2str(ptr, item.length, hour);
break;
}
case 'M': // minute
ptr = int2str(ptr, item.length, timeparts[TP_MINUTE]);
break;
case 'S': // second
ptr = int2str(ptr, item.length, timeparts[TP_SECOND]);
break;
case 'f': // sub-second
{
char subsecond_digits[] = "000000000"; // 9 max digits
const int digits = [units = units] {
if (units == timestamp_units::ms) return 3;
if (units == timestamp_units::us) return 6;
if (units == timestamp_units::ns) return 9;
return 0;
}();
int2str(subsecond_digits, digits, timeparts[TP_SUBSECOND]);
ptr = copy_and_increment(ptr, subsecond_digits, item.length);
break;
}
case 'p': // am or pm
// 0 = 12am, 12 = 12pm
if (timeparts[TP_HOUR] < 12)
memcpy(ptr, "AM", 2);
else
memcpy(ptr, "PM", 2);
ptr += 2;
break;
case 'z': // timezone
memcpy(ptr, "+0000", 5); // always UTC
ptr += 5;
break;
case 'Z':
memcpy(ptr, "UTC", 3);
ptr += 3;
break;
default: // ignore everything else
break;
}
}
return ptr;
}
__device__ void operator()(size_type idx)
{
if (d_timestamps.is_null(idx)) return;
auto timestamp = d_timestamps.element<T>(idx);
int32_t timeparts[TP_ARRAYSIZE] = {0};
dissect_timestamp(timestamp.time_since_epoch().count(), timeparts);
// convert to characters
format_from_parts(timeparts, d_chars + d_offsets[idx]);
}
};
//
struct dispatch_from_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const& d_timestamps,
format_item const* d_format_items,
size_type items_count,
timestamp_units units,
const int32_t* d_offsets,
char* d_chars,
rmm::cuda_stream_view stream) const
{
datetime_formatter<T> pfn{d_timestamps, d_format_items, items_count, units, d_offsets, d_chars};
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
d_timestamps.size(),
pfn);
}
template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const&,
format_item const*,
size_type,
timestamp_units,
const int32_t*,
char* d_chars,
rmm::cuda_stream_view stream) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = timestamps.size();
if (strings_count == 0) return make_empty_strings_column(stream, mr);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
timestamp_units units =
cudf::type_dispatcher(timestamps.type(), dispatch_timestamp_to_units_fn());
format_compiler compiler(format.c_str(), stream);
auto d_format_items = compiler.format_items();
auto column = column_device_view::create(timestamps, stream);
auto d_column = *column;
// copy null mask
rmm::device_buffer null_mask = cudf::detail::copy_bitmask(timestamps, stream, mr);
// Each string will be the same number of bytes which can be determined
// directly from the format string.
auto d_str_bytes = compiler.template_bytes(); // size in bytes of each string
// build offsets column
auto offsets_transformer_itr =
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
[d_column, d_str_bytes] __device__(size_type idx) {
return (d_column.is_null(idx) ? 0 : d_str_bytes);
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto offsets_view = offsets_column->view();
auto d_new_offsets = offsets_view.template data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_new_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, timestamps.null_count(), bytes, stream, mr);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.template data<char>();
// fill in chars column with timestamps
// dispatcher is called to handle the different timestamp types
cudf::type_dispatcher(timestamps.type(),
dispatch_from_timestamps_fn(),
d_column,
d_format_items,
compiler.items_count(),
units,
d_new_offsets,
d_chars,
stream);
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
timestamps.null_count(),
std::move(null_mask),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::from_timestamps(timestamps, format, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
e8a3e465ced7f63e615569e0140f2498c2bb5ca9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <limits.h>
#include <stdlib.h>
#include <ctime>
#include <sstream>
#include <string>
#include "gpu_hashtable.hpp"
__global__ void set_zero (key_value_pair *bucket, int bucket_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bucket_size <= idx) {
return;
}
bucket[idx].key = 0;
bucket[idx].value = 0;
}
__global__ void get_keys (int *new_keys, int *new_values, int numKeys, key_value_pair *bucket_1, key_value_pair *bucket_2, int bucket_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, hash;
// Las sa ruleze un numar de threaduri egal cu numarul de perechi
if (numKeys <= idx) {
return;
}
// Incerc sa scot din pozitia data de una dintre cele 3 functii de hash
hash = hash1 (new_keys[idx], bucket_size);
if (new_keys[idx] == bucket_1[hash].key) {
new_values[idx] = bucket_1[hash].value;
return;
}
if (new_keys[idx] == bucket_2[hash].key) {
new_values[idx] = bucket_2[hash].value;
return;
}
// Daca nicio functie de hash nu a functionat
for (i = hash + 1; i < bucket_size; i++) {
if (new_keys[idx] == bucket_1[i].key) {
new_values[idx] = bucket_1[i].value;
return;
}
if (new_keys[idx] == bucket_2[i].key) {
new_values[idx] = bucket_2[i].value;
return;
}
}
for (i = 0; i < hash; i++) {
if (new_keys[idx] == bucket_1[i].key) {
new_values[idx] = bucket_1[i].value;
return;
}
if (new_keys[idx] == bucket_2[i].key) {
new_values[idx] = bucket_2[i].value;
return;
}
}
}
__global__ void insert_keys (int *new_keys, int *new_values, int numKeys, key_value_pair *bucket_1, key_value_pair *bucket_2, int bucket_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, hash, old;
// Las sa ruleze un numar de threaduri egal cu numarul de perechi
if (numKeys <= idx) {
return;
}
// Incerc sa inserez in pozitia data de una dintre cele 3 functii de hash
hash = hash1 (new_keys[idx], bucket_size);
old = atomicCAS (&bucket_1[hash].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_1[hash].value, new_values[idx]);
return;
}
old = atomicCAS (&bucket_2[hash].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_2[hash].value, new_values[idx]);
return;
}
// Daca nicio functie de hash nu a functionat, inserez in orice loc liber gasesc
for (i = hash + 1; i < bucket_size; i++) {
old = atomicCAS (&bucket_1[i].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_1[i].value, new_values[idx]);
return;
}
old = atomicCAS (&bucket_2[i].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_2[i].value, new_values[idx]);
return;
}
}
for (i = 0; i < hash; i++) {
old = atomicCAS (&bucket_1[i].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_1[i].value, new_values[idx]);
return;
}
old = atomicCAS (&bucket_2[i].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_2[i].value, new_values[idx]);
return;
}
}
bucket_1[idx].value = -19;
bucket_1[idx].key = -19;
}
/* MOVE DATA FROM OLD BUCKET TO NEW BIGGER BUCKET
*/
__global__ void move_bucket (key_value_pair *old_bucket, key_value_pair *new_bucket1, key_value_pair *new_bucket2, int old_bucket_size, int new_bucket_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, old, hash;
// Las sa ruleze un numar de threaduri egal cu marimea bucket-ului, doar pt intrarile cu valori in bucket
if ((old_bucket_size <= idx) || (old_bucket[idx].key == KEY_INVALID)) {
return;
}
// Incerc sa inserez in pozitia data de una dintre cele 3 functii de hash
hash = hash1 (old_bucket[idx].key, new_bucket_size);
old = atomicCAS (&new_bucket1[hash].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket1[hash].value, old_bucket[idx].value);
return;
}
old = atomicCAS (&new_bucket2[hash].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket2[hash].value, old_bucket[idx].value);
return;
}
// Daca nicio functie de hash nu a functionat, inserez in orice loc liber gasesc
for (i = hash + 1; i < new_bucket_size; i++) {
old = atomicCAS (&new_bucket1[i].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket1[i].value, old_bucket[idx].value);
return;
}
old = atomicCAS (&new_bucket2[i].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket2[i].value, old_bucket[idx].value);
return;
}
}
for (i = 0; i < hash; i++) {
old = atomicCAS (&new_bucket1[i].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket1[i].value, old_bucket[idx].value);
return;
}
old = atomicCAS (&new_bucket2[i].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket2[i].value, old_bucket[idx].value);
return;
}
}
}
/* INIT HASH
*/
GpuHashTable::GpuHashTable(int size) {
int rc;
// Sunt 2 bucketuri, deci avem (size / 2 + 1) spatii per bucket
total_size = (size / 2 + 1) * 2;
free_size = total_size;
// Asa am vazut in lab
bucket_1 = 0;
bucket_2 = 0;
// Aloc memorie pentru fiecare bucket si il setez la 0
rc = hipMalloc (&bucket_1, (total_size / 2) * sizeof (key_value_pair));
DIE (rc != hipSuccess, "Eroare in init la alocare bucket_1!");
rc = hipMalloc (&bucket_2, (total_size / 2) * sizeof (key_value_pair));
DIE (rc != hipSuccess, "Eroare in init la alocare bucket_2!");
}
/* DESTROY HASH
*/
GpuHashTable::~GpuHashTable() {
hipFree(bucket_1);
hipFree(bucket_2);
}
/* RESHAPE HASH
*/
void GpuHashTable::reshape(int numBucketsReshape) {
int rc;
int blocks_number;
// Verific daca marimea ceruta este valida
if (numBucketsReshape <= total_size) {
return;
}
// Noile bucket-uri care le vor inlocui pe cele vechi
key_value_pair *bucket_1_new;
key_value_pair *bucket_2_new;
bucket_1_new = 0;
bucket_2_new = 0;
// Aloc memorie pentru cele 2 noi bucket-uri
rc = hipMalloc (&bucket_1_new, (numBucketsReshape / 2 + 1) * sizeof (key_value_pair));
DIE (rc != hipSuccess, "Eroare in reshape la alocare bucket_1_new!");
rc = hipMalloc (&bucket_2_new, (numBucketsReshape / 2 + 1) * sizeof (key_value_pair));
DIE (rc != hipSuccess, "Eroare in reshape la alocare bucket_2_new!");
// Calculez cate blocuri vor rula
blocks_number = (total_size / 2) / THREADS_NUMBER + 1;
// Trec datele din vechile bucket-uri in cele noi
hipLaunchKernelGGL(( move_bucket) , dim3(blocks_number), dim3(THREADS_NUMBER), 0, 0, bucket_1, bucket_1_new, bucket_2_new, (total_size / 2), (numBucketsReshape / 2 + 1));
hipDeviceSynchronize();
hipLaunchKernelGGL(( move_bucket) , dim3(blocks_number), dim3(THREADS_NUMBER), 0, 0, bucket_2, bucket_1_new, bucket_2_new, (total_size / 2), (numBucketsReshape / 2 + 1));
hipDeviceSynchronize();
// Updatez metricile
free_size += (((numBucketsReshape / 2 + 1) * 2) - total_size);
total_size = (numBucketsReshape / 2 + 1) * 2;
// Inlocuiesc vechile bucket-uri cu cele noi
hipFree (bucket_1);
hipFree (bucket_2);
bucket_1 = bucket_1_new;
bucket_2 = bucket_2_new;
}
/* INSERT BATCH
*/
bool GpuHashTable::insertBatch(int *keys, int* values, int numKeys) {
int rc;
int blocks_number;
// Mut perechile cheie - valoare in memoria device-ului
int *new_keys;
int *new_values;
new_keys = 0;
new_values = 0;
rc = hipMalloc (&new_keys, numKeys * sizeof (int));
DIE (rc != hipSuccess, "Eroare in insertBatch la alocare new_keys!");
hipMemset (&new_keys, 0, numKeys * sizeof (int));
rc = hipMalloc (&new_values, numKeys * sizeof (int));
DIE (rc != hipSuccess, "Eroare in insertBatch la alocare new_values!");
hipMemset (&new_values, 0, numKeys * sizeof (int));
hipMemcpy (new_keys, keys, numKeys * sizeof (int), hipMemcpyHostToDevice);
hipMemcpy (new_values, values, numKeys * sizeof (int), hipMemcpyHostToDevice);
// Daca cu noile chei se umple hashmapul mai mult de 75%, fac reshape pentru a avea un load factor de 50% dupa adaugarea noilor chei
if ((total_size - free_size + numKeys) > ((float)((float)(95.00f / 100.00f) * (float)total_size))) {
reshape ((total_size - free_size + numKeys) * 100 / 81);
}
// Calculez cate blocuri vor rula
blocks_number = numKeys / THREADS_NUMBER + 1;
hipLaunchKernelGGL(( insert_keys) , dim3(blocks_number), dim3(THREADS_NUMBER), 0, 0, new_keys, new_values, numKeys, bucket_1, bucket_2, (total_size / 2));
// Astept ca toate blocurile sa se termine
hipDeviceSynchronize();
free_size -= numKeys;
// Sterg din memoria device-ului perechile cheie - valoare
hipFree (new_keys);
hipFree (new_values);
return true;
}
/* GET BATCH
*/
int* GpuHashTable::getBatch(int* keys, int numKeys) {
int rc;
int blocks_number;
// Aloc memorie pentru chei si valori in VRAM
int *new_keys;
int *new_values;
int *results;
new_keys = 0;
new_values = 0;
rc = hipMalloc (&new_keys, numKeys * sizeof (int));
DIE (rc != hipSuccess, "Eroare in getBatch la alocare new_keys!");
hipMemset (&new_keys, 0, numKeys * sizeof (int));
hipMalloc (&new_values, numKeys * sizeof (int));
DIE (rc != hipSuccess, "Eroare in getBatch la alocare new_values!");
hipMemset (&new_values, 0, numKeys * sizeof (int));
// Copiez cheile in VRAM
hipMemcpy (new_keys, keys, numKeys * sizeof (int), hipMemcpyHostToDevice);
// Calculez cate blocuri vor rula
blocks_number = numKeys / THREADS_NUMBER + 1;
hipLaunchKernelGGL(( get_keys) , dim3(blocks_number), dim3(THREADS_NUMBER), 0, 0, new_keys, new_values, numKeys, bucket_1, bucket_2, (total_size / 2));
// Astept ca toate blocurile sa se termine
hipDeviceSynchronize();
// Copiez in memoria host-ului
results = (int*) malloc (numKeys * sizeof (int));
hipMemcpy (results, new_values, numKeys * sizeof (int), hipMemcpyDeviceToHost);
// Sterg din memoria device-ului perechile cheie - valoare
hipFree (new_keys);
hipFree (new_values);
return results;
}
/* GET LOAD FACTOR
* num elements / hash total slots elements
*/
float GpuHashTable::loadFactor() {
if (total_size == 0) {
return 0.19f;
}
return (float)((float)((float)total_size - (float)free_size) / (float)total_size);
}
/*********************************************************/
#define HASH_INIT GpuHashTable GpuHashTable(1);
#define HASH_RESERVE(size) GpuHashTable.reshape(size);
#define HASH_BATCH_INSERT(keys, values, numKeys) GpuHashTable.insertBatch(keys, values, numKeys)
#define HASH_BATCH_GET(keys, numKeys) GpuHashTable.getBatch(keys, numKeys)
#define HASH_LOAD_FACTOR GpuHashTable.loadFactor()
#include "test_map.cpp"
| e8a3e465ced7f63e615569e0140f2498c2bb5ca9.cu | #include <iostream>
#include <limits.h>
#include <stdlib.h>
#include <ctime>
#include <sstream>
#include <string>
#include "gpu_hashtable.hpp"
__global__ void set_zero (key_value_pair *bucket, int bucket_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bucket_size <= idx) {
return;
}
bucket[idx].key = 0;
bucket[idx].value = 0;
}
__global__ void get_keys (int *new_keys, int *new_values, int numKeys, key_value_pair *bucket_1, key_value_pair *bucket_2, int bucket_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, hash;
// Las sa ruleze un numar de threaduri egal cu numarul de perechi
if (numKeys <= idx) {
return;
}
// Incerc sa scot din pozitia data de una dintre cele 3 functii de hash
hash = hash1 (new_keys[idx], bucket_size);
if (new_keys[idx] == bucket_1[hash].key) {
new_values[idx] = bucket_1[hash].value;
return;
}
if (new_keys[idx] == bucket_2[hash].key) {
new_values[idx] = bucket_2[hash].value;
return;
}
// Daca nicio functie de hash nu a functionat
for (i = hash + 1; i < bucket_size; i++) {
if (new_keys[idx] == bucket_1[i].key) {
new_values[idx] = bucket_1[i].value;
return;
}
if (new_keys[idx] == bucket_2[i].key) {
new_values[idx] = bucket_2[i].value;
return;
}
}
for (i = 0; i < hash; i++) {
if (new_keys[idx] == bucket_1[i].key) {
new_values[idx] = bucket_1[i].value;
return;
}
if (new_keys[idx] == bucket_2[i].key) {
new_values[idx] = bucket_2[i].value;
return;
}
}
}
__global__ void insert_keys (int *new_keys, int *new_values, int numKeys, key_value_pair *bucket_1, key_value_pair *bucket_2, int bucket_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, hash, old;
// Las sa ruleze un numar de threaduri egal cu numarul de perechi
if (numKeys <= idx) {
return;
}
// Incerc sa inserez in pozitia data de una dintre cele 3 functii de hash
hash = hash1 (new_keys[idx], bucket_size);
old = atomicCAS (&bucket_1[hash].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_1[hash].value, new_values[idx]);
return;
}
old = atomicCAS (&bucket_2[hash].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_2[hash].value, new_values[idx]);
return;
}
// Daca nicio functie de hash nu a functionat, inserez in orice loc liber gasesc
for (i = hash + 1; i < bucket_size; i++) {
old = atomicCAS (&bucket_1[i].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_1[i].value, new_values[idx]);
return;
}
old = atomicCAS (&bucket_2[i].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_2[i].value, new_values[idx]);
return;
}
}
for (i = 0; i < hash; i++) {
old = atomicCAS (&bucket_1[i].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_1[i].value, new_values[idx]);
return;
}
old = atomicCAS (&bucket_2[i].key, KEY_INVALID, new_keys[idx]);
if ((old == KEY_INVALID) || (old == new_keys[idx])) {
atomicExch (&bucket_2[i].value, new_values[idx]);
return;
}
}
bucket_1[idx].value = -19;
bucket_1[idx].key = -19;
}
/* MOVE DATA FROM OLD BUCKET TO NEW BIGGER BUCKET
*/
__global__ void move_bucket (key_value_pair *old_bucket, key_value_pair *new_bucket1, key_value_pair *new_bucket2, int old_bucket_size, int new_bucket_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, old, hash;
// Las sa ruleze un numar de threaduri egal cu marimea bucket-ului, doar pt intrarile cu valori in bucket
if ((old_bucket_size <= idx) || (old_bucket[idx].key == KEY_INVALID)) {
return;
}
// Incerc sa inserez in pozitia data de una dintre cele 3 functii de hash
hash = hash1 (old_bucket[idx].key, new_bucket_size);
old = atomicCAS (&new_bucket1[hash].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket1[hash].value, old_bucket[idx].value);
return;
}
old = atomicCAS (&new_bucket2[hash].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket2[hash].value, old_bucket[idx].value);
return;
}
// Daca nicio functie de hash nu a functionat, inserez in orice loc liber gasesc
for (i = hash + 1; i < new_bucket_size; i++) {
old = atomicCAS (&new_bucket1[i].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket1[i].value, old_bucket[idx].value);
return;
}
old = atomicCAS (&new_bucket2[i].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket2[i].value, old_bucket[idx].value);
return;
}
}
for (i = 0; i < hash; i++) {
old = atomicCAS (&new_bucket1[i].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket1[i].value, old_bucket[idx].value);
return;
}
old = atomicCAS (&new_bucket2[i].key, KEY_INVALID, old_bucket[idx].key);
if (old == KEY_INVALID) {
atomicExch (&new_bucket2[i].value, old_bucket[idx].value);
return;
}
}
}
/* INIT HASH
*/
GpuHashTable::GpuHashTable(int size) {
int rc;
// Sunt 2 bucketuri, deci avem (size / 2 + 1) spatii per bucket
total_size = (size / 2 + 1) * 2;
free_size = total_size;
// Asa am vazut in lab
bucket_1 = 0;
bucket_2 = 0;
// Aloc memorie pentru fiecare bucket si il setez la 0
rc = cudaMalloc (&bucket_1, (total_size / 2) * sizeof (key_value_pair));
DIE (rc != cudaSuccess, "Eroare in init la alocare bucket_1!");
rc = cudaMalloc (&bucket_2, (total_size / 2) * sizeof (key_value_pair));
DIE (rc != cudaSuccess, "Eroare in init la alocare bucket_2!");
}
/* DESTROY HASH
*/
GpuHashTable::~GpuHashTable() {
cudaFree(bucket_1);
cudaFree(bucket_2);
}
/* RESHAPE HASH
*/
void GpuHashTable::reshape(int numBucketsReshape) {
int rc;
int blocks_number;
// Verific daca marimea ceruta este valida
if (numBucketsReshape <= total_size) {
return;
}
// Noile bucket-uri care le vor inlocui pe cele vechi
key_value_pair *bucket_1_new;
key_value_pair *bucket_2_new;
bucket_1_new = 0;
bucket_2_new = 0;
// Aloc memorie pentru cele 2 noi bucket-uri
rc = cudaMalloc (&bucket_1_new, (numBucketsReshape / 2 + 1) * sizeof (key_value_pair));
DIE (rc != cudaSuccess, "Eroare in reshape la alocare bucket_1_new!");
rc = cudaMalloc (&bucket_2_new, (numBucketsReshape / 2 + 1) * sizeof (key_value_pair));
DIE (rc != cudaSuccess, "Eroare in reshape la alocare bucket_2_new!");
// Calculez cate blocuri vor rula
blocks_number = (total_size / 2) / THREADS_NUMBER + 1;
// Trec datele din vechile bucket-uri in cele noi
move_bucket <<<blocks_number, THREADS_NUMBER>>> (bucket_1, bucket_1_new, bucket_2_new, (total_size / 2), (numBucketsReshape / 2 + 1));
cudaDeviceSynchronize();
move_bucket <<<blocks_number, THREADS_NUMBER>>> (bucket_2, bucket_1_new, bucket_2_new, (total_size / 2), (numBucketsReshape / 2 + 1));
cudaDeviceSynchronize();
// Updatez metricile
free_size += (((numBucketsReshape / 2 + 1) * 2) - total_size);
total_size = (numBucketsReshape / 2 + 1) * 2;
// Inlocuiesc vechile bucket-uri cu cele noi
cudaFree (bucket_1);
cudaFree (bucket_2);
bucket_1 = bucket_1_new;
bucket_2 = bucket_2_new;
}
/* INSERT BATCH
*/
bool GpuHashTable::insertBatch(int *keys, int* values, int numKeys) {
int rc;
int blocks_number;
// Mut perechile cheie - valoare in memoria device-ului
int *new_keys;
int *new_values;
new_keys = 0;
new_values = 0;
rc = cudaMalloc (&new_keys, numKeys * sizeof (int));
DIE (rc != cudaSuccess, "Eroare in insertBatch la alocare new_keys!");
cudaMemset (&new_keys, 0, numKeys * sizeof (int));
rc = cudaMalloc (&new_values, numKeys * sizeof (int));
DIE (rc != cudaSuccess, "Eroare in insertBatch la alocare new_values!");
cudaMemset (&new_values, 0, numKeys * sizeof (int));
cudaMemcpy (new_keys, keys, numKeys * sizeof (int), cudaMemcpyHostToDevice);
cudaMemcpy (new_values, values, numKeys * sizeof (int), cudaMemcpyHostToDevice);
// Daca cu noile chei se umple hashmapul mai mult de 75%, fac reshape pentru a avea un load factor de 50% dupa adaugarea noilor chei
if ((total_size - free_size + numKeys) > ((float)((float)(95.00f / 100.00f) * (float)total_size))) {
reshape ((total_size - free_size + numKeys) * 100 / 81);
}
// Calculez cate blocuri vor rula
blocks_number = numKeys / THREADS_NUMBER + 1;
insert_keys <<<blocks_number, THREADS_NUMBER>>> (new_keys, new_values, numKeys, bucket_1, bucket_2, (total_size / 2));
// Astept ca toate blocurile sa se termine
cudaDeviceSynchronize();
free_size -= numKeys;
// Sterg din memoria device-ului perechile cheie - valoare
cudaFree (new_keys);
cudaFree (new_values);
return true;
}
/* GET BATCH
*/
int* GpuHashTable::getBatch(int* keys, int numKeys) {
int rc;
int blocks_number;
// Aloc memorie pentru chei si valori in VRAM
int *new_keys;
int *new_values;
int *results;
new_keys = 0;
new_values = 0;
rc = cudaMalloc (&new_keys, numKeys * sizeof (int));
DIE (rc != cudaSuccess, "Eroare in getBatch la alocare new_keys!");
cudaMemset (&new_keys, 0, numKeys * sizeof (int));
cudaMalloc (&new_values, numKeys * sizeof (int));
DIE (rc != cudaSuccess, "Eroare in getBatch la alocare new_values!");
cudaMemset (&new_values, 0, numKeys * sizeof (int));
// Copiez cheile in VRAM
cudaMemcpy (new_keys, keys, numKeys * sizeof (int), cudaMemcpyHostToDevice);
// Calculez cate blocuri vor rula
blocks_number = numKeys / THREADS_NUMBER + 1;
get_keys <<<blocks_number, THREADS_NUMBER>>> (new_keys, new_values, numKeys, bucket_1, bucket_2, (total_size / 2));
// Astept ca toate blocurile sa se termine
cudaDeviceSynchronize();
// Copiez in memoria host-ului
results = (int*) malloc (numKeys * sizeof (int));
cudaMemcpy (results, new_values, numKeys * sizeof (int), cudaMemcpyDeviceToHost);
// Sterg din memoria device-ului perechile cheie - valoare
cudaFree (new_keys);
cudaFree (new_values);
return results;
}
/* GET LOAD FACTOR
* num elements / hash total slots elements
*/
float GpuHashTable::loadFactor() {
if (total_size == 0) {
return 0.19f;
}
return (float)((float)((float)total_size - (float)free_size) / (float)total_size);
}
/*********************************************************/
#define HASH_INIT GpuHashTable GpuHashTable(1);
#define HASH_RESERVE(size) GpuHashTable.reshape(size);
#define HASH_BATCH_INSERT(keys, values, numKeys) GpuHashTable.insertBatch(keys, values, numKeys)
#define HASH_BATCH_GET(keys, numKeys) GpuHashTable.getBatch(keys, numKeys)
#define HASH_LOAD_FACTOR GpuHashTable.loadFactor()
#include "test_map.cpp"
|
49f580695900ad62da38fc6110f0f070a5dcdd87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_gcn.cuh"
#include "timer.h"
#include <algorithm>
#include <thrust/transform.h>
using std::max;
using std::max_element;
CUDAGCN::CUDAGCN(GCNParams params, GCNData *input_data) {
cuda_init_random_state(MAX_THREAD_PER_BLOCK);
this->params = params;
data = input_data;
sp = new CUDASparseIndex(data->feature_index);
graph = new CUDASparseIndex(data->graph);
modules.reserve(8);
variables.reserve(8);
// dropout
variables.emplace_back(data->feature_index.indices.size(), false);
input = &variables.back();
modules.push_back(new CUDADropout(input, params.dropout));
// sparse matmul
variables.emplace_back(params.num_nodes * params.hidden_dim);
CUDAVariable *layer1_var1 = &variables.back();
variables.emplace_back(params.input_dim * params.hidden_dim, true);
CUDAVariable *layer1_weight = &variables.back();
layer1_weight->glorot(params.input_dim, params.hidden_dim);
modules.push_back(new CUDASparseMatmul(input, layer1_weight, layer1_var1, sp, params.num_nodes, params.input_dim, params.hidden_dim));
// graph sum
variables.emplace_back(params.num_nodes * params.hidden_dim);
CUDAVariable *layer1_var2 = &variables.back();
modules.push_back(new CUDAGraphSum(layer1_var1, layer1_var2, graph, params.hidden_dim));
// ReLU
modules.push_back(new CUDAReLU(layer1_var2));
// dropout
modules.push_back(new CUDADropout(layer1_var2, params.dropout));
// dense matmul
variables.emplace_back(params.num_nodes * params.output_dim);
CUDAVariable *layer2_var1 = &variables.back();
variables.emplace_back(params.hidden_dim * params.output_dim, true);
CUDAVariable *layer2_weight = &variables.back();
layer2_weight->glorot(params.hidden_dim, params.output_dim);
modules.push_back(new CUDAMatmul(layer1_var2, layer2_weight, layer2_var1, params.num_nodes, params.hidden_dim, params.output_dim));
// graph sum
variables.emplace_back(params.num_nodes * params.output_dim);
output = &variables.back();
modules.push_back(new CUDAGraphSum(layer2_var1, output, graph, params.output_dim));
// cross entropy loss
CUDA_CHECK(hipMalloc((void**) &truth, params.num_nodes * sizeof(int)));
modules.push_back(new CUDACrossEntropyLoss(output, truth, &loss, params.output_dim));
// optimizer
AdamParams adam_params = AdamParams::get_default();
adam_params.lr = params.learning_rate;
adam_params.weight_decay = params.weight_decay;
optimizer = new CUDAAdam({{layer1_weight, true}, {layer2_weight, false}}, adam_params);
// other variable
CUDA_CHECK(hipMalloc((void**) &d_l2_penalty, variables[2].size * sizeof(float)));
}
CUDAGCN::~CUDAGCN() {
cuda_free_random_state();
for (auto &m : modules) delete m;
delete sp;
delete graph;
delete optimizer;
CUDA_CHECK(hipFree(truth));
CUDA_CHECK(hipFree(d_l2_penalty));
}
void CUDAGCN::set_input() {
CUDA_CHECK(hipMemcpy(input->data, data->feature_value.data(), input->size * sizeof(float), hipMemcpyHostToDevice));
}
void CUDAGCN::set_truth(int current_split) {
int *d_data_split, *d_data_label;
CUDA_CHECK(hipMalloc((void**) &d_data_split, params.num_nodes * sizeof(int)));
CUDA_CHECK(hipMalloc((void**) &d_data_label, params.num_nodes * sizeof(int)));
CUDA_CHECK(hipMemcpy(d_data_split, data->split.data(), params.num_nodes * sizeof(int), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_data_label, data->label.data(), params.num_nodes * sizeof(int), hipMemcpyHostToDevice));
dim3 block((params.num_nodes-1)/MAX_THREAD_PER_BLOCK + 1, 1, 1);
dim3 thread_in_block(MAX_THREAD_PER_BLOCK, 1, 1);
hipLaunchKernelGGL(( cuda_set_truth_kernel), dim3(block), dim3(thread_in_block), 0, 0, truth, d_data_split, d_data_label, current_split, params.num_nodes);
CUDA_CHECK(hipFree(d_data_split));
CUDA_CHECK(hipFree(d_data_label));
}
// TODO: reduction (using thrust?)
float CUDAGCN::get_accuracy() {
int *cpu_truth = new int[params.num_nodes];
float *cpu_output = new float[output->size];
CUDA_CHECK(hipMemcpy(cpu_truth, truth, params.num_nodes * sizeof(int), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(cpu_output, output->data, output->size * sizeof(float), hipMemcpyDeviceToHost));
int wrong = 0, total = 0;
for(int i = 0; i < params.num_nodes; i++) {
if(cpu_truth[i] < 0) continue;
total++;
float truth_logit = cpu_output[i * params.output_dim + cpu_truth[i]];
for(int j = 0; j < params.output_dim; j++)
if (cpu_output[i * params.output_dim + j] > truth_logit) {
wrong++;
break;
}
}
delete[] cpu_truth;
delete[] cpu_output;
return float(total - wrong) / total;
}
struct square_functor{
square_functor() {}
__host__ __device__ float operator()(const float &x) const {
return x * x;
}
};
float CUDAGCN::get_l2_penalty() {
int size = variables[2].size;
thrust::device_ptr<float> l2_ptr(d_l2_penalty), var2_ptr(variables[2].data);
thrust::transform(var2_ptr, var2_ptr + size, l2_ptr, square_functor());
float l2 = thrust::reduce(l2_ptr, l2_ptr + size, (float)0.0, thrust::plus<float>());
return params.weight_decay * l2 / 2;
}
pair<float, float> CUDAGCN::train_epoch() {
set_input();
set_truth(1);
for (auto m: modules)
m->forward(true);
float train_loss = loss + get_l2_penalty();
float train_acc = get_accuracy();
for (int i = modules.size() - 1; i >= 0; i--)
modules[i]->backward();
optimizer->step();
return {train_loss, train_acc};
}
pair<float, float> CUDAGCN::eval(int current_split) {
set_input();
set_truth(current_split);
for (auto m: modules)
m->forward(false);
float test_loss = loss + get_l2_penalty();
float test_acc = get_accuracy();
return {test_loss, test_acc};
}
void CUDAGCN::run() {
int epoch = 1;
std::vector<float> loss_history;
for(; epoch <= params.epochs; epoch++) {
float train_loss, train_acc, val_loss, val_acc;
timer_start(TMR_TRAIN);
std::tie(train_loss, train_acc) = train_epoch();
std::tie(val_loss, val_acc) = eval(2);
printf("epoch=%d train_loss=%.5f train_acc=%.5f val_loss=%.5f val_acc=%.5f time=%.5f\n",
epoch, train_loss, train_acc, val_loss, val_acc, timer_stop(TMR_TRAIN));
loss_history.push_back(val_loss);
if(params.early_stopping > 0 && epoch >= params.early_stopping) {
float recent_loss = 0.0;
for(int i = epoch - params.early_stopping; i < epoch; i++)
recent_loss += loss_history[i];
if (val_loss > recent_loss / params.early_stopping) {
printf("Early stopping...\n");
break;
}
}
}
printf("total training time=%.5f\n", timer_total(TMR_TRAIN));
float test_loss, test_acc;
timer_start(TMR_TEST);
std::tie(test_loss, test_acc) = eval(3);
printf("test_loss=%.5f test_acc=%.5f time=%.5f\n", test_loss, test_acc, timer_stop(TMR_TEST));
}
| 49f580695900ad62da38fc6110f0f070a5dcdd87.cu | #include "cuda_gcn.cuh"
#include "timer.h"
#include <algorithm>
#include <thrust/transform.h>
using std::max;
using std::max_element;
CUDAGCN::CUDAGCN(GCNParams params, GCNData *input_data) {
cuda_init_random_state(MAX_THREAD_PER_BLOCK);
this->params = params;
data = input_data;
sp = new CUDASparseIndex(data->feature_index);
graph = new CUDASparseIndex(data->graph);
modules.reserve(8);
variables.reserve(8);
// dropout
variables.emplace_back(data->feature_index.indices.size(), false);
input = &variables.back();
modules.push_back(new CUDADropout(input, params.dropout));
// sparse matmul
variables.emplace_back(params.num_nodes * params.hidden_dim);
CUDAVariable *layer1_var1 = &variables.back();
variables.emplace_back(params.input_dim * params.hidden_dim, true);
CUDAVariable *layer1_weight = &variables.back();
layer1_weight->glorot(params.input_dim, params.hidden_dim);
modules.push_back(new CUDASparseMatmul(input, layer1_weight, layer1_var1, sp, params.num_nodes, params.input_dim, params.hidden_dim));
// graph sum
variables.emplace_back(params.num_nodes * params.hidden_dim);
CUDAVariable *layer1_var2 = &variables.back();
modules.push_back(new CUDAGraphSum(layer1_var1, layer1_var2, graph, params.hidden_dim));
// ReLU
modules.push_back(new CUDAReLU(layer1_var2));
// dropout
modules.push_back(new CUDADropout(layer1_var2, params.dropout));
// dense matmul
variables.emplace_back(params.num_nodes * params.output_dim);
CUDAVariable *layer2_var1 = &variables.back();
variables.emplace_back(params.hidden_dim * params.output_dim, true);
CUDAVariable *layer2_weight = &variables.back();
layer2_weight->glorot(params.hidden_dim, params.output_dim);
modules.push_back(new CUDAMatmul(layer1_var2, layer2_weight, layer2_var1, params.num_nodes, params.hidden_dim, params.output_dim));
// graph sum
variables.emplace_back(params.num_nodes * params.output_dim);
output = &variables.back();
modules.push_back(new CUDAGraphSum(layer2_var1, output, graph, params.output_dim));
// cross entropy loss
CUDA_CHECK(cudaMalloc((void**) &truth, params.num_nodes * sizeof(int)));
modules.push_back(new CUDACrossEntropyLoss(output, truth, &loss, params.output_dim));
// optimizer
AdamParams adam_params = AdamParams::get_default();
adam_params.lr = params.learning_rate;
adam_params.weight_decay = params.weight_decay;
optimizer = new CUDAAdam({{layer1_weight, true}, {layer2_weight, false}}, adam_params);
// other variable
CUDA_CHECK(cudaMalloc((void**) &d_l2_penalty, variables[2].size * sizeof(float)));
}
CUDAGCN::~CUDAGCN() {
cuda_free_random_state();
for (auto &m : modules) delete m;
delete sp;
delete graph;
delete optimizer;
CUDA_CHECK(cudaFree(truth));
CUDA_CHECK(cudaFree(d_l2_penalty));
}
void CUDAGCN::set_input() {
CUDA_CHECK(cudaMemcpy(input->data, data->feature_value.data(), input->size * sizeof(float), cudaMemcpyHostToDevice));
}
void CUDAGCN::set_truth(int current_split) {
int *d_data_split, *d_data_label;
CUDA_CHECK(cudaMalloc((void**) &d_data_split, params.num_nodes * sizeof(int)));
CUDA_CHECK(cudaMalloc((void**) &d_data_label, params.num_nodes * sizeof(int)));
CUDA_CHECK(cudaMemcpy(d_data_split, data->split.data(), params.num_nodes * sizeof(int), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_data_label, data->label.data(), params.num_nodes * sizeof(int), cudaMemcpyHostToDevice));
dim3 block((params.num_nodes-1)/MAX_THREAD_PER_BLOCK + 1, 1, 1);
dim3 thread_in_block(MAX_THREAD_PER_BLOCK, 1, 1);
cuda_set_truth_kernel<<<block, thread_in_block>>>(truth, d_data_split, d_data_label, current_split, params.num_nodes);
CUDA_CHECK(cudaFree(d_data_split));
CUDA_CHECK(cudaFree(d_data_label));
}
// TODO: reduction (using thrust?)
float CUDAGCN::get_accuracy() {
int *cpu_truth = new int[params.num_nodes];
float *cpu_output = new float[output->size];
CUDA_CHECK(cudaMemcpy(cpu_truth, truth, params.num_nodes * sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(cpu_output, output->data, output->size * sizeof(float), cudaMemcpyDeviceToHost));
int wrong = 0, total = 0;
for(int i = 0; i < params.num_nodes; i++) {
if(cpu_truth[i] < 0) continue;
total++;
float truth_logit = cpu_output[i * params.output_dim + cpu_truth[i]];
for(int j = 0; j < params.output_dim; j++)
if (cpu_output[i * params.output_dim + j] > truth_logit) {
wrong++;
break;
}
}
delete[] cpu_truth;
delete[] cpu_output;
return float(total - wrong) / total;
}
struct square_functor{
square_functor() {}
__host__ __device__ float operator()(const float &x) const {
return x * x;
}
};
float CUDAGCN::get_l2_penalty() {
int size = variables[2].size;
thrust::device_ptr<float> l2_ptr(d_l2_penalty), var2_ptr(variables[2].data);
thrust::transform(var2_ptr, var2_ptr + size, l2_ptr, square_functor());
float l2 = thrust::reduce(l2_ptr, l2_ptr + size, (float)0.0, thrust::plus<float>());
return params.weight_decay * l2 / 2;
}
pair<float, float> CUDAGCN::train_epoch() {
set_input();
set_truth(1);
for (auto m: modules)
m->forward(true);
float train_loss = loss + get_l2_penalty();
float train_acc = get_accuracy();
for (int i = modules.size() - 1; i >= 0; i--)
modules[i]->backward();
optimizer->step();
return {train_loss, train_acc};
}
pair<float, float> CUDAGCN::eval(int current_split) {
set_input();
set_truth(current_split);
for (auto m: modules)
m->forward(false);
float test_loss = loss + get_l2_penalty();
float test_acc = get_accuracy();
return {test_loss, test_acc};
}
void CUDAGCN::run() {
int epoch = 1;
std::vector<float> loss_history;
for(; epoch <= params.epochs; epoch++) {
float train_loss, train_acc, val_loss, val_acc;
timer_start(TMR_TRAIN);
std::tie(train_loss, train_acc) = train_epoch();
std::tie(val_loss, val_acc) = eval(2);
printf("epoch=%d train_loss=%.5f train_acc=%.5f val_loss=%.5f val_acc=%.5f time=%.5f\n",
epoch, train_loss, train_acc, val_loss, val_acc, timer_stop(TMR_TRAIN));
loss_history.push_back(val_loss);
if(params.early_stopping > 0 && epoch >= params.early_stopping) {
float recent_loss = 0.0;
for(int i = epoch - params.early_stopping; i < epoch; i++)
recent_loss += loss_history[i];
if (val_loss > recent_loss / params.early_stopping) {
printf("Early stopping...\n");
break;
}
}
}
printf("total training time=%.5f\n", timer_total(TMR_TRAIN));
float test_loss, test_acc;
timer_start(TMR_TEST);
std::tie(test_loss, test_acc) = eval(3);
printf("test_loss=%.5f test_acc=%.5f time=%.5f\n", test_loss, test_acc, timer_stop(TMR_TEST));
}
|
07d520b56d63190daf54539d2f6d0ca682339052.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vecAddGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
double *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
double n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vecAddGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vecAddGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vecAddGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 07d520b56d63190daf54539d2f6d0ca682339052.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vecAddGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
double *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
double n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vecAddGPU<<<gridBlock,threadBlock>>>(a,b,c,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vecAddGPU<<<gridBlock,threadBlock>>>(a,b,c,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vecAddGPU<<<gridBlock,threadBlock>>>(a,b,c,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cc1503f49ade276b8431006d9260ef7f544970ac.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <pthread.h>
#include <string.h>
#include <stdio.h>
#include <opencv/cvaux.h>
#include <opencv2/opencv.hpp>
#include <opencv/highgui.h>
#include <opencv/cxcore.h>
#include <opencv/cv.h>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/highgui/highgui.hpp"
#include <string>
#include <string.h>
#include <iostream>
#include <hip/hip_runtime.h>
struct pixel{
int R,G,B;
};
__global__ void blur(const pixel *src_img, pixel *dst_img,int Ksize, int h, int w, int numElements, int threads){
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
if (index<w-Ksize)
{
int n;
int l;
int i;
int j;
int sumR=0;
int sumG=0;
int sumB=0;
int klimitr; //limites de convolucion
int klimitc; //limites de convolucion
int aux=((h-(2*(Ksize/2)))/threads);
int start= index*aux;
int end=start+aux;
int Km=Ksize/2;
if (threads>(h-(2*Km)))
{
aux=1;
}
//i=index+(Ksize/2);
for (i = start+Ksize/2; i < end+Ksize/2; ++i)
{
/* code */
for (j = Ksize/2; j < w-Ksize/2; ++j)
{
sumB=0;
sumG=0;
sumR=0;
if (Ksize%2==0)
{
klimitr=i+(Ksize/2)-1;
klimitc=j+(Ksize/2)-1;
}else{
klimitr=i+(Ksize/2); //limites de convolucion
klimitc=j+(Ksize/2); //limites de convolucion
}
for (l = i-(Ksize/2); l <= klimitr; ++l) //convocucion
{
for (n = j-(Ksize/2); n <= klimitc; ++n) //convolucion
{
sumR+=src_img[l*w+n].R;
sumG+=src_img[l*w+n].G;
sumB+=src_img[l*w+n].B; //canal rojo suma de vecinos
}
}
sumR-=src_img[i*w+j].R;
sumG-=src_img[i*w+j].G;
sumB-=src_img[i*w+j].B; //canal rojo resta de px a tratar
dst_img[i*w+j].R=sumR/((Ksize*Ksize)-1);
dst_img[i*w+j].G=sumG/((Ksize*Ksize)-1);
dst_img[i*w+j].B=sumB/((Ksize*Ksize)-1);
}
}
}
}
using namespace cv;
int main(int argc, char const *argv[])
{
Mat mat = imread(argv[1], CV_LOAD_IMAGE_COLOR);
int Ksize=atoi(argv[2]);
int h=mat.rows;
int w=mat.cols;
/* if (!img)
{
printf("Image: can NOT Load!!!\n");
return 1;
}*/
copyMakeBorder( mat, mat, Ksize/2, Ksize/2, Ksize/2, Ksize/2, BORDER_REPLICATE);
int tmpH=mat.rows;
int tmpW=mat.cols;
hipError_t err = hipSuccess;
int numElements = tmpH*tmpW;
int size = numElements*sizeof(pixel);
pixel * h_img = (pixel*)malloc(size);
if (h_img == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < tmpH; ++i)
{
for (int j = 0; j < tmpW; ++j)
{
h_img[(tmpW*i)+j].B=(mat.at<cv::Vec3b>(i,j)[0]);
h_img[(tmpW*i)+j].G=(mat.at<cv::Vec3b>(i,j)[1]);
h_img[(tmpW*i)+j].R=(mat.at<cv::Vec3b>(i,j)[2]);
}
}
pixel * d_img;
err = hipMalloc((void**)&d_img, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
pixel * d_imgDst;
err = hipMalloc((void**)&d_imgDst, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector D_imgDst (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_img, h_img, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector h_R from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int threadsPerBlock = atoi(argv[3]);
int blocks=atoi(argv[4]);
if (blocks==0){
blocks= (h/threadsPerBlock)+1;
}
int threads=blocks*threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocks, threadsPerBlock);
hipLaunchKernelGGL(( blur), dim3(blocks),dim3(threadsPerBlock), 0, 0, d_img, d_imgDst, Ksize, tmpH, tmpW, numElements, threads);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch blur kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_img, d_imgDst, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector d_dstR from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_img);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_img (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_imgDst);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_imgDst (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
implementacion de vectores resultantes de cuda blur kernel a Mat m
*/
for (int i = 0; i < tmpH; ++i)
{
for (int j = 0; j < tmpW; ++j)
{
(mat.at<cv::Vec3b>(i,j)[0])=h_img[(tmpW*i)+j].B;
(mat.at<cv::Vec3b>(i,j)[1])=h_img[(tmpW*i)+j].G;
(mat.at<cv::Vec3b>(i,j)[2])=h_img[(tmpW*i)+j].R;
}
}
Mat cropedImage = mat(Rect(Ksize/2,Ksize/2,w,h));
std::string filename=argv[1];
filename=filename.substr(0,sizeof(argv[1]));
cv::imwrite( filename+" --NOCROPPED-- .jpg", mat);
cv::imwrite( filename+" --blurred-- .jpg", cropedImage);
free(h_img);
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
} | cc1503f49ade276b8431006d9260ef7f544970ac.cu | #include <stdlib.h>
#include <pthread.h>
#include <string.h>
#include <stdio.h>
#include <opencv/cvaux.h>
#include <opencv2/opencv.hpp>
#include <opencv/highgui.h>
#include <opencv/cxcore.h>
#include <opencv/cv.h>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/highgui/highgui.hpp"
#include <string>
#include <string.h>
#include <iostream>
#include <cuda_runtime.h>
struct pixel{
int R,G,B;
};
__global__ void blur(const pixel *src_img, pixel *dst_img,int Ksize, int h, int w, int numElements, int threads){
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
if (index<w-Ksize)
{
int n;
int l;
int i;
int j;
int sumR=0;
int sumG=0;
int sumB=0;
int klimitr; //limites de convolucion
int klimitc; //limites de convolucion
int aux=((h-(2*(Ksize/2)))/threads);
int start= index*aux;
int end=start+aux;
int Km=Ksize/2;
if (threads>(h-(2*Km)))
{
aux=1;
}
//i=index+(Ksize/2);
for (i = start+Ksize/2; i < end+Ksize/2; ++i)
{
/* code */
for (j = Ksize/2; j < w-Ksize/2; ++j)
{
sumB=0;
sumG=0;
sumR=0;
if (Ksize%2==0)
{
klimitr=i+(Ksize/2)-1;
klimitc=j+(Ksize/2)-1;
}else{
klimitr=i+(Ksize/2); //limites de convolucion
klimitc=j+(Ksize/2); //limites de convolucion
}
for (l = i-(Ksize/2); l <= klimitr; ++l) //convocucion
{
for (n = j-(Ksize/2); n <= klimitc; ++n) //convolucion
{
sumR+=src_img[l*w+n].R;
sumG+=src_img[l*w+n].G;
sumB+=src_img[l*w+n].B; //canal rojo suma de vecinos
}
}
sumR-=src_img[i*w+j].R;
sumG-=src_img[i*w+j].G;
sumB-=src_img[i*w+j].B; //canal rojo resta de px a tratar
dst_img[i*w+j].R=sumR/((Ksize*Ksize)-1);
dst_img[i*w+j].G=sumG/((Ksize*Ksize)-1);
dst_img[i*w+j].B=sumB/((Ksize*Ksize)-1);
}
}
}
}
using namespace cv;
int main(int argc, char const *argv[])
{
Mat mat = imread(argv[1], CV_LOAD_IMAGE_COLOR);
int Ksize=atoi(argv[2]);
int h=mat.rows;
int w=mat.cols;
/* if (!img)
{
printf("Image: can NOT Load!!!\n");
return 1;
}*/
copyMakeBorder( mat, mat, Ksize/2, Ksize/2, Ksize/2, Ksize/2, BORDER_REPLICATE);
int tmpH=mat.rows;
int tmpW=mat.cols;
cudaError_t err = cudaSuccess;
int numElements = tmpH*tmpW;
int size = numElements*sizeof(pixel);
pixel * h_img = (pixel*)malloc(size);
if (h_img == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < tmpH; ++i)
{
for (int j = 0; j < tmpW; ++j)
{
h_img[(tmpW*i)+j].B=(mat.at<cv::Vec3b>(i,j)[0]);
h_img[(tmpW*i)+j].G=(mat.at<cv::Vec3b>(i,j)[1]);
h_img[(tmpW*i)+j].R=(mat.at<cv::Vec3b>(i,j)[2]);
}
}
pixel * d_img;
err = cudaMalloc((void**)&d_img, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
pixel * d_imgDst;
err = cudaMalloc((void**)&d_imgDst, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector D_imgDst (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_img, h_img, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector h_R from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int threadsPerBlock = atoi(argv[3]);
int blocks=atoi(argv[4]);
if (blocks==0){
blocks= (h/threadsPerBlock)+1;
}
int threads=blocks*threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocks, threadsPerBlock);
blur<<<blocks,threadsPerBlock>>>(d_img, d_imgDst, Ksize, tmpH, tmpW, numElements, threads);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch blur kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_img, d_imgDst, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_dstR from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_img);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_img (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_imgDst);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_imgDst (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
implementacion de vectores resultantes de cuda blur kernel a Mat m
*/
for (int i = 0; i < tmpH; ++i)
{
for (int j = 0; j < tmpW; ++j)
{
(mat.at<cv::Vec3b>(i,j)[0])=h_img[(tmpW*i)+j].B;
(mat.at<cv::Vec3b>(i,j)[1])=h_img[(tmpW*i)+j].G;
(mat.at<cv::Vec3b>(i,j)[2])=h_img[(tmpW*i)+j].R;
}
}
Mat cropedImage = mat(Rect(Ksize/2,Ksize/2,w,h));
std::string filename=argv[1];
filename=filename.substr(0,sizeof(argv[1]));
cv::imwrite( filename+" --NOCROPPED-- .jpg", mat);
cv::imwrite( filename+" --blurred-- .jpg", cropedImage);
free(h_img);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
} |
7c86b1d17affa49de0769f9d9faabdd726ec6fa2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rolling/jit/code/code.h>
#include <rolling/rolling_detail.hpp>
#include <rolling/rolling_jit_detail.hpp>
#include <cudf/aggregation.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/aggregation/aggregation.cuh>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/groupby/sort_helper.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/rolling.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <jit/launcher.h>
#include <jit/parser.h>
#include <jit/type.h>
#include <jit/bit.hpp.jit>
#include <jit/rolling_jit_detail.hpp.jit>
#include <jit/types.hpp.jit>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <thrust/binary_search.h>
#include <thrust/detail/execution_policy.h>
#include <thrust/execution_policy.h>
#include <thrust/find.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <memory>
namespace cudf {
namespace detail {
namespace { // anonymous
/**
* @brief Only COUNT_VALID operation is executed and count is updated
* depending on `min_periods` and returns true if it was
* valid, else false.
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls,
std::enable_if_t<op == aggregation::COUNT_VALID>* = nullptr>
bool __device__ process_rolling_window(column_device_view input,
column_device_view ignored_default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods)
{
// declare this as volatile to avoid some compiler optimizations that lead to incorrect results
// for CUDA 10.0 and below (fixed in CUDA 10.1)
volatile cudf::size_type count = 0;
bool output_is_valid = ((end_index - start_index) >= min_periods);
if (output_is_valid) {
if (!has_nulls) {
count = end_index - start_index;
} else {
count = thrust::count_if(thrust::seq,
thrust::make_counting_iterator(start_index),
thrust::make_counting_iterator(end_index),
[&input](auto i) { return input.is_valid_nocheck(i); });
}
output.element<OutputType>(current_index) = count;
}
return output_is_valid;
}
/**
* @brief Only COUNT_ALL operation is executed and count is updated
* depending on `min_periods` and returns true if it was
* valid, else false.
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls,
std::enable_if_t<op == aggregation::COUNT_ALL>* = nullptr>
bool __device__ process_rolling_window(column_device_view input,
column_device_view ignored_default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods)
{
cudf::size_type count = end_index - start_index;
bool output_is_valid = (count >= min_periods);
output.element<OutputType>(current_index) = count;
return output_is_valid;
}
/**
* @brief Calculates row-number within [start_index, end_index).
* Count is updated depending on `min_periods`
* Returns true if it was valid, else false.
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls,
std::enable_if_t<op == aggregation::ROW_NUMBER>* = nullptr>
bool __device__ process_rolling_window(column_device_view input,
column_device_view ignored_default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods)
{
bool output_is_valid = ((end_index - start_index) >= min_periods);
output.element<OutputType>(current_index) = ((current_index - start_index) + 1);
return output_is_valid;
}
/**
* @brief LEAD(N): Returns the row from the input column, at the specified offset past the
* current row.
* If the offset crosses the grouping boundary or column boundary for
* a given row, a "default" value is returned. The "default" value is null, by default.
*
* E.g. Consider an input column with the following values and grouping:
* [10, 11, 12, 13, 20, 21, 22, 23]
* <------G1-----> <------G2------>
*
* LEAD(input_col, 1) yields:
* [11, 12, 13, null, 21, 22, 23, null]
*
* LEAD(input_col, 1, 99) (where 99 indicates the default) yields:
* [11, 12, 13, 99, 21, 22, 23, 99]
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls>
std::enable_if_t<(op == aggregation::LEAD) && (cudf::is_fixed_width<InputType>()), bool> __device__
process_rolling_window(column_device_view input,
column_device_view default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods,
agg_op device_agg_op)
{
// Offsets have already been normalized.
auto row_offset = device_agg_op.row_offset;
// Check if row is invalid.
if (row_offset > (end_index - current_index - 1)) {
// Invalid row marked. Use default value, if available.
if (default_outputs.size() == 0 || default_outputs.is_null(current_index)) { return false; }
output.element<OutputType>(current_index) = default_outputs.element<OutputType>(current_index);
return true;
}
// Not an invalid row.
auto index = current_index + row_offset;
auto is_null = input.is_null(index);
if (!is_null) { output.element<OutputType>(current_index) = input.element<InputType>(index); }
return !is_null;
}
/**
* @brief LAG(N): returns the row from the input column at the specified offset preceding
* the current row.
* If the offset crosses the grouping boundary or column boundary for
* a given row, a "default" value is returned. The "default" value is null, by default.
*
* E.g. Consider an input column with the following values and grouping:
* [10, 11, 12, 13, 20, 21, 22, 23]
* <------G1-----> <------G2------>
*
* LAG(input_col, 2) yields:
* [null, null, 10, 11, null, null, 20, 21]
* LAG(input_col, 2, 99) yields:
* [99, 99, 10, 11, 99, 99, 20, 21]
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls>
std::enable_if_t<(op == aggregation::LAG) && (cudf::is_fixed_width<InputType>()), bool> __device__
process_rolling_window(column_device_view input,
column_device_view default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods,
agg_op device_agg_op)
{
// Offsets have already been normalized.
auto row_offset = device_agg_op.row_offset;
// Check if row is invalid.
if (row_offset > (current_index - start_index)) {
// Invalid row marked. Use default value, if available.
if (default_outputs.size() == 0 || default_outputs.is_null(current_index)) { return false; }
output.element<OutputType>(current_index) = default_outputs.element<OutputType>(current_index);
return true;
}
// Not an invalid row.
auto index = current_index - row_offset;
auto is_null = input.is_null(index);
if (!is_null) { output.element<OutputType>(current_index) = input.element<InputType>(index); }
return !is_null;
}
/**
* @brief Only used for `string_view` type to get ARGMIN and ARGMAX, which
* will be used to gather MIN and MAX. And returns true if the
* operation was valid, else false.
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls,
std::enable_if_t<(op == aggregation::ARGMIN or op == aggregation::ARGMAX) and
std::is_same<InputType, cudf::string_view>::value>* = nullptr>
bool __device__ process_rolling_window(column_device_view input,
column_device_view ignored_default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods)
{
// declare this as volatile to avoid some compiler optimizations that lead to incorrect results
// for CUDA 10.0 and below (fixed in CUDA 10.1)
volatile cudf::size_type count = 0;
InputType val = agg_op::template identity<InputType>();
OutputType val_index = (op == aggregation::ARGMIN) ? ARGMIN_SENTINEL : ARGMAX_SENTINEL;
for (size_type j = start_index; j < end_index; j++) {
if (!has_nulls || input.is_valid(j)) {
InputType element = input.element<InputType>(j);
val = agg_op{}(element, val);
if (val == element) { val_index = j; }
count++;
}
}
bool output_is_valid = (count >= min_periods);
// -1 will help identify null elements while gathering for Min and Max
// In case of count, this would be null, so doesn't matter.
output.element<OutputType>(current_index) = (output_is_valid) ? val_index : -1;
// The gather mask shouldn't contain null values, so
// always return zero
return true;
}
/**
* @brief Operates on only fixed-width types and returns true if the
* operation was valid, else false.
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls,
std::enable_if_t<!std::is_same<InputType, cudf::string_view>::value and
!(op == aggregation::COUNT_VALID || op == aggregation::COUNT_ALL ||
op == aggregation::ROW_NUMBER || op == aggregation::LEAD ||
op == aggregation::LAG)>* = nullptr>
bool __device__ process_rolling_window(column_device_view input,
column_device_view ignored_default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods)
{
// declare this as volatile to avoid some compiler optimizations that lead to incorrect results
// for CUDA 10.0 and below (fixed in CUDA 10.1)
volatile cudf::size_type count = 0;
OutputType val = agg_op::template identity<OutputType>();
for (size_type j = start_index; j < end_index; j++) {
if (!has_nulls || input.is_valid(j)) {
OutputType element = input.element<InputType>(j);
val = agg_op{}(element, val);
count++;
}
}
bool output_is_valid = (count >= min_periods);
// store the output value, one per thread
cudf::detail::rolling_store_output_functor<OutputType, op == aggregation::MEAN>{}(
output.element<OutputType>(current_index), val, count);
return output_is_valid;
}
/**
* @brief Computes the rolling window function
*
* @tparam InputType Datatype of `input`
* @tparam OutputType Datatype of `output`
* @tparam agg_op A functor that defines the aggregation operation
* @tparam op The aggregation operator (enum value)
* @tparam block_size CUDA block size for the kernel
* @tparam has_nulls true if the input column has nulls
* @tparam PrecedingWindowIterator iterator type (inferred)
* @tparam FollowingWindowIterator iterator type (inferred)
* @param input Input column device view
* @param output Output column device view
* @param preceding_window_begin[in] Rolling window size iterator, accumulates from
* in_col[i-preceding_window] to in_col[i] inclusive
* @param following_window_begin[in] Rolling window size iterator in the forward
* direction, accumulates from in_col[i] to
* in_col[i+following_window] inclusive
* @param min_periods[in] Minimum number of observations in window required to
* have a value, otherwise 0 is stored in the valid bit mask
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
int block_size,
bool has_nulls,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
__launch_bounds__(block_size) __global__
void gpu_rolling(column_device_view input,
column_device_view default_outputs,
mutable_column_device_view output,
size_type* __restrict__ output_valid_count,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods)
{
size_type i = blockIdx.x * block_size + threadIdx.x;
size_type stride = block_size * gridDim.x;
size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffffffff, i < input.size());
while (i < input.size()) {
size_type preceding_window = preceding_window_begin[i];
size_type following_window = following_window_begin[i];
// compute bounds
size_type start = min(input.size(), max(0, i - preceding_window + 1));
size_type end = min(input.size(), max(0, i + following_window + 1));
size_type start_index = min(start, end);
size_type end_index = max(start, end);
// aggregate
// TODO: We should explore using shared memory to avoid redundant loads.
// This might require separating the kernel into a special version
// for dynamic and static sizes.
volatile bool output_is_valid = false;
output_is_valid = process_rolling_window<InputType, OutputType, agg_op, op, has_nulls>(
input, default_outputs, output, start_index, end_index, i, min_periods);
// set the mask
cudf::bitmask_type result_mask{__ballot_sync(active_threads, output_is_valid)};
// only one thread writes the mask
if (0 == threadIdx.x % cudf::detail::warp_size) {
output.set_mask_word(cudf::word_index(i), result_mask);
warp_valid_count += __popc(result_mask);
}
// process next element
i += stride;
active_threads = __ballot_sync(active_threads, i < input.size());
}
// sum the valid counts across the whole block
size_type block_valid_count =
cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
int block_size,
bool has_nulls,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
__launch_bounds__(block_size) __global__
void gpu_rolling(column_device_view input,
column_device_view default_outputs,
mutable_column_device_view output,
size_type* __restrict__ output_valid_count,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
agg_op device_agg_op)
{
size_type i = blockIdx.x * block_size + threadIdx.x;
size_type stride = block_size * gridDim.x;
size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffffffff, i < input.size());
while (i < input.size()) {
size_type preceding_window = preceding_window_begin[i];
size_type following_window = following_window_begin[i];
// compute bounds
size_type start = min(input.size(), max(0, i - preceding_window + 1));
size_type end = min(input.size(), max(0, i + following_window + 1));
size_type start_index = min(start, end);
size_type end_index = max(start, end);
// aggregate
// TODO: We should explore using shared memory to avoid redundant loads.
// This might require separating the kernel into a special version
// for dynamic and static sizes.
volatile bool output_is_valid = false;
output_is_valid = process_rolling_window<InputType, OutputType, agg_op, op, has_nulls>(
input, default_outputs, output, start_index, end_index, i, min_periods, device_agg_op);
// set the mask
cudf::bitmask_type result_mask{__ballot_sync(active_threads, output_is_valid)};
// only one thread writes the mask
if (0 == threadIdx.x % cudf::detail::warp_size) {
output.set_mask_word(cudf::word_index(i), result_mask);
warp_valid_count += __popc(result_mask);
}
// process next element
i += stride;
active_threads = __ballot_sync(active_threads, i < input.size());
}
// sum the valid counts across the whole block
size_type block_valid_count =
cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
template <typename InputType>
struct rolling_window_launcher {
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
size_type kernel_launcher(column_view const& input,
column_view const& default_outputs,
mutable_column_view& output,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream)
{
constexpr cudf::size_type block_size = 256;
cudf::detail::grid_1d grid(input.size(), block_size);
auto input_device_view = column_device_view::create(input, stream);
auto output_device_view = mutable_column_device_view::create(output, stream);
auto default_outputs_device_view = column_device_view::create(default_outputs, stream);
rmm::device_scalar<size_type> device_valid_count{0, stream};
if (input.has_nulls()) {
hipLaunchKernelGGL(( gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, true>)
, dim3(grid.num_blocks), dim3(block_size), 0, stream.value(), *input_device_view,
*default_outputs_device_view,
*output_device_view,
device_valid_count.data(),
preceding_window_begin,
following_window_begin,
min_periods);
} else {
hipLaunchKernelGGL(( gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, false>)
, dim3(grid.num_blocks), dim3(block_size), 0, stream.value(), *input_device_view,
*default_outputs_device_view,
*output_device_view,
device_valid_count.data(),
preceding_window_begin,
following_window_begin,
min_periods);
}
size_type valid_count = device_valid_count.value(stream);
// check the stream for debugging
CHECK_CUDA(stream.value());
return valid_count;
}
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
size_type kernel_launcher(column_view const& input,
column_view const& default_outputs,
mutable_column_view& output,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
agg_op const& device_agg_op,
rmm::cuda_stream_view stream)
{
constexpr cudf::size_type block_size = 256;
cudf::detail::grid_1d grid(input.size(), block_size);
auto input_device_view = column_device_view::create(input, stream);
auto output_device_view = mutable_column_device_view::create(output, stream);
auto default_outputs_device_view = column_device_view::create(default_outputs, stream);
rmm::device_scalar<size_type> device_valid_count{0, stream};
if (input.has_nulls()) {
hipLaunchKernelGGL(( gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, true>)
, dim3(grid.num_blocks), dim3(block_size), 0, stream.value(), *input_device_view,
*default_outputs_device_view,
*output_device_view,
device_valid_count.data(),
preceding_window_begin,
following_window_begin,
min_periods,
device_agg_op);
} else {
hipLaunchKernelGGL(( gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, false>)
, dim3(grid.num_blocks), dim3(block_size), 0, stream.value(), *input_device_view,
*default_outputs_device_view,
*output_device_view,
device_valid_count.data(),
preceding_window_begin,
following_window_begin,
min_periods,
device_agg_op);
}
size_type valid_count = device_valid_count.value(stream);
// check the stream for debugging
CHECK_CUDA(stream.value());
return valid_count;
}
// This launch is only for fixed width columns with valid aggregation option
// numeric: All
// timestamp: MIN, MAX, COUNT_VALID, COUNT_ALL, ROW_NUMBER
// string, dictionary, list : COUNT_VALID, COUNT_ALL, ROW_NUMBER
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<cudf::detail::is_rolling_supported<T, agg_op, op>() and
!cudf::detail::is_rolling_string_specialization<T, agg_op, op>(),
std::unique_ptr<column>>
launch(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
auto output = make_fixed_width_column(
target_type(input.type(), op), input.size(), mask_state::UNINITIALIZED, stream, mr);
cudf::mutable_column_view output_view = output->mutable_view();
auto valid_count =
kernel_launcher<T, agg_op, op, PrecedingWindowIterator, FollowingWindowIterator>(
input,
default_outputs,
output_view,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream);
output->set_null_count(output->size() - valid_count);
return output;
}
// This launch is only for string specializations
// string: MIN, MAX
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<cudf::detail::is_rolling_string_specialization<T, agg_op, op>(),
std::unique_ptr<column>>
launch(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
auto output = make_numeric_column(cudf::data_type{cudf::type_to_id<size_type>()},
input.size(),
cudf::mask_state::UNINITIALIZED,
stream,
mr);
cudf::mutable_column_view output_view = output->mutable_view();
// Passing the agg_op and aggregation::Kind as constant to group them in pair, else it
// evolves to error when try to use agg_op as compiler tries different combinations
if (op == aggregation::MIN) {
kernel_launcher<T,
DeviceMin,
aggregation::ARGMIN,
PrecedingWindowIterator,
FollowingWindowIterator>(input,
default_outputs,
output_view,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream);
} else if (op == aggregation::MAX) {
kernel_launcher<T,
DeviceMax,
aggregation::ARGMAX,
PrecedingWindowIterator,
FollowingWindowIterator>(input,
default_outputs,
output_view,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream);
} else {
CUDF_FAIL("MIN and MAX are the only supported aggregation types for string columns");
}
// The rows that represent null elements will be having negative values in gather map,
// and that's why nullify_out_of_bounds/ignore_out_of_bounds is true.
auto output_table = detail::gather(table_view{{input}},
output->view(),
cudf::out_of_bounds_policy::NULLIFY,
detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
return std::make_unique<cudf::column>(std::move(output_table->get_column(0)));
}
// Deals with invalid column and/or aggregation options
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<!cudf::detail::is_rolling_supported<T, agg_op, op>() and
!cudf::detail::is_rolling_string_specialization<T, agg_op, op>(),
std::unique_ptr<column>>
launch(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("Aggregation operator and/or input type combination is invalid");
}
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<cudf::is_fixed_width<T>() and
(op == aggregation::LEAD || op == aggregation::LAG),
std::unique_ptr<column>>
launch(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
agg_op const& device_agg_op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
CUDF_EXPECTS(default_outputs.type().id() == input.type().id(),
"Defaults column type must match input column."); // Because LEAD/LAG.
// For LEAD(0)/LAG(0), no computation need be performed.
// Return copy of input.
if (0 == static_cast<cudf::detail::lead_lag_aggregation*>(agg.get())->row_offset) {
return std::make_unique<column>(input, stream, mr);
}
auto output = make_fixed_width_column(
target_type(input.type(), op), input.size(), mask_state::UNINITIALIZED, stream, mr);
cudf::mutable_column_view output_view = output->mutable_view();
auto valid_count =
kernel_launcher<T, agg_op, op, PrecedingWindowIterator, FollowingWindowIterator>(
input,
default_outputs,
output_view,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
device_agg_op,
stream);
output->set_null_count(output->size() - valid_count);
return output;
}
// Deals with invalid column and/or aggregation options
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<!(op == aggregation::LEAD || op == aggregation::LAG) ||
!cudf::is_fixed_width<T>(),
std::unique_ptr<column>>
launch(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
agg_op device_agg_op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL(
"Aggregation operator and/or input type combination is invalid: "
"LEAD/LAG supported only on fixed-width types");
}
template <aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<!(op == aggregation::MEAN || op == aggregation::LEAD || op == aggregation::LAG),
std::unique_ptr<column>>
operator()(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(default_outputs.is_empty(),
"Only LEAD/LAG window functions support default values.");
return launch<InputType,
typename corresponding_operator<op>::type,
op,
PrecedingWindowIterator,
FollowingWindowIterator>(input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream,
mr);
}
// This variant is just to handle mean
template <aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<(op == aggregation::MEAN), std::unique_ptr<column>> operator()(
column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return launch<InputType, cudf::DeviceSum, op, PrecedingWindowIterator, FollowingWindowIterator>(
input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream,
mr);
}
template <aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<(op == aggregation::LEAD || op == aggregation::LAG), std::unique_ptr<column>>
operator()(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return launch<InputType,
cudf::DeviceLeadLag,
op,
PrecedingWindowIterator,
FollowingWindowIterator>(
input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
cudf::DeviceLeadLag{static_cast<cudf::detail::lead_lag_aggregation*>(agg.get())->row_offset},
stream,
mr);
}
};
struct dispatch_rolling {
template <typename T, typename PrecedingWindowIterator, typename FollowingWindowIterator>
std::unique_ptr<column> operator()(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return aggregation_dispatcher(agg->kind,
rolling_window_launcher<T>{},
input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream,
mr);
}
};
} // namespace
// Applies a user-defined rolling window function to the values in a column.
template <typename PrecedingWindowIterator, typename FollowingWindowIterator>
std::unique_ptr<column> rolling_window_udf(column_view const& input,
PrecedingWindowIterator preceding_window,
std::string const& preceding_window_str,
FollowingWindowIterator following_window,
std::string const& following_window_str,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"bitmask_type size does not match CUDA warp size");
if (input.has_nulls())
CUDF_FAIL("Currently the UDF version of rolling window does NOT support inputs with nulls.");
min_periods = ::max(min_periods, 0);
auto udf_agg = static_cast<udf_aggregation*>(agg.get());
std::string hash = "prog_rolling." + std::to_string(std::hash<std::string>{}(udf_agg->_source));
std::string cuda_source;
switch (udf_agg->kind) {
case aggregation::Kind::PTX:
cuda_source = cudf::rolling::jit::code::kernel_headers;
cuda_source +=
cudf::jit::parse_single_function_ptx(udf_agg->_source,
udf_agg->_function_name,
cudf::jit::get_type_name(udf_agg->_output_type),
{0, 5}); // args 0 and 5 are pointers.
cuda_source += cudf::rolling::jit::code::kernel;
break;
case aggregation::Kind::CUDA:
cuda_source = cudf::rolling::jit::code::kernel_headers;
cuda_source +=
cudf::jit::parse_single_function_cuda(udf_agg->_source, udf_agg->_function_name);
cuda_source += cudf::rolling::jit::code::kernel;
break;
default: CUDF_FAIL("Unsupported UDF type.");
}
std::unique_ptr<column> output = make_numeric_column(
udf_agg->_output_type, input.size(), cudf::mask_state::UNINITIALIZED, stream, mr);
auto output_view = output->mutable_view();
rmm::device_scalar<size_type> device_valid_count{0, stream};
const std::vector<std::string> compiler_flags{"-std=c++14",
// Have jitify prune unused global variables
"-remove-unused-globals",
// suppress all NVRTC warnings
"-w"};
// Launch the jitify kernel
cudf::jit::launcher(hash,
cuda_source,
{cudf_types_hpp,
cudf_utilities_bit_hpp,
cudf::rolling::jit::code::operation_h,
___src_rolling_rolling_jit_detail_hpp},
compiler_flags,
nullptr,
stream)
.set_kernel_inst("gpu_rolling_new", // name of the kernel we are launching
{cudf::jit::get_type_name(input.type()), // list of template arguments
cudf::jit::get_type_name(output->type()),
udf_agg->_operator_name,
preceding_window_str.c_str(),
following_window_str.c_str()})
.launch(input.size(),
cudf::jit::get_data_ptr(input),
input.null_mask(),
cudf::jit::get_data_ptr(output_view),
output_view.null_mask(),
device_valid_count.data(),
preceding_window,
following_window,
min_periods);
output->set_null_count(output->size() - device_valid_count.value(stream));
// check the stream for debugging
CHECK_CUDA(stream.value());
return output;
}
/**
* @copydoc cudf::rolling_window(column_view const& input,
* PrecedingWindowIterator preceding_window_begin,
* FollowingWindowIterator following_window_begin,
* size_type min_periods,
* std::unique_ptr<aggregation> const& agg,
* rmm::mr::device_memory_resource* mr)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
template <typename PrecedingWindowIterator, typename FollowingWindowIterator>
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"bitmask_type size does not match CUDA warp size");
min_periods = ::max(min_periods, 0);
return cudf::type_dispatcher(input.type(),
dispatch_rolling{},
input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream,
mr);
}
} // namespace detail
// Applies a fixed-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
size_type preceding_window,
size_type following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr)
{
return rolling_window(
input, empty_like(input)->view(), preceding_window, following_window, min_periods, agg, mr);
}
// Applies a fixed-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& default_outputs,
size_type preceding_window,
size_type following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (input.is_empty()) return empty_like(input);
CUDF_EXPECTS((min_periods >= 0), "min_periods must be non-negative");
CUDF_EXPECTS((default_outputs.is_empty() || default_outputs.size() == input.size()),
"Defaults column must be either empty or have as many rows as the input column.");
if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) {
return cudf::detail::rolling_window_udf(input,
preceding_window,
"cudf::size_type",
following_window,
"cudf::size_type",
min_periods,
agg,
rmm::cuda_stream_default,
mr);
} else {
auto preceding_window_begin = thrust::make_constant_iterator(preceding_window);
auto following_window_begin = thrust::make_constant_iterator(following_window);
return cudf::detail::rolling_window(input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
rmm::cuda_stream_default,
mr);
}
}
// Applies a variable-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& preceding_window,
column_view const& following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (preceding_window.is_empty() || following_window.is_empty() || input.is_empty())
return empty_like(input);
CUDF_EXPECTS(preceding_window.type().id() == type_id::INT32 &&
following_window.type().id() == type_id::INT32,
"preceding_window/following_window must have type_id::INT32 type");
CUDF_EXPECTS(preceding_window.size() == input.size() && following_window.size() == input.size(),
"preceding_window/following_window size must match input size");
if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) {
return cudf::detail::rolling_window_udf(input,
preceding_window.begin<size_type>(),
"cudf::size_type*",
following_window.begin<size_type>(),
"cudf::size_type*",
min_periods,
agg,
rmm::cuda_stream_default,
mr);
} else {
return cudf::detail::rolling_window(input,
empty_like(input)->view(),
preceding_window.begin<size_type>(),
following_window.begin<size_type>(),
min_periods,
agg,
rmm::cuda_stream_default,
mr);
}
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
size_type preceding_window,
size_type following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_rolling_window(group_keys,
input,
window_bounds::get(preceding_window),
window_bounds::get(following_window),
min_periods,
aggr,
mr);
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
window_bounds preceding_window,
window_bounds following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_rolling_window(group_keys,
input,
empty_like(input)->view(),
preceding_window,
following_window,
min_periods,
aggr,
mr);
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
column_view const& default_outputs,
size_type preceding_window,
size_type following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_rolling_window(group_keys,
input,
default_outputs,
window_bounds::get(preceding_window),
window_bounds::get(following_window),
min_periods,
aggr,
mr);
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
column_view const& default_outputs,
window_bounds preceding_window_bounds,
window_bounds following_window_bounds,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (input.is_empty()) return empty_like(input);
CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()),
"Size mismatch between group_keys and input vector.");
CUDF_EXPECTS((min_periods > 0), "min_periods must be positive");
CUDF_EXPECTS((default_outputs.is_empty() || default_outputs.size() == input.size()),
"Defaults column must be either empty or have as many rows as the input column.");
auto const preceding_window = preceding_window_bounds.value;
auto const following_window = following_window_bounds.value;
if (group_keys.num_columns() == 0) {
// No Groupby columns specified. Treat as one big group.
return rolling_window(
input, default_outputs, preceding_window, following_window, min_periods, aggr, mr);
}
using sort_groupby_helper = cudf::groupby::detail::sort::sort_groupby_helper;
sort_groupby_helper helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES};
auto group_offsets{helper.group_offsets()};
auto const& group_labels{helper.group_labels()};
// `group_offsets` are interpreted in adjacent pairs, each pair representing the offsets
// of the first, and one past the last elements in a group.
//
// If `group_offsets` is not empty, it must contain at least two offsets:
// a. 0, indicating the first element in `input`
// b. input.size(), indicating one past the last element in `input`.
//
// Thus, for an input of 1000 rows,
// 0. [] indicates a single group, spanning the entire column.
// 1 [10] is invalid.
// 2. [0, 1000] indicates a single group, spanning the entire column (thus, equivalent to no
// groups.)
// 3. [0, 500, 1000] indicates two equal-sized groups: [0,500), and [500,1000).
assert(group_offsets.size() >= 2 && group_offsets[0] == 0 &&
group_offsets[group_offsets.size() - 1] == input.size() &&
"Must have at least one group.");
auto preceding_calculator = [d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
preceding_window] __device__(size_type idx) {
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
return thrust::minimum<size_type>{}(preceding_window,
idx - group_start + 1); // Preceding includes current row.
};
auto following_calculator = [d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
following_window] __device__(size_type idx) {
auto group_label = d_group_labels[idx];
auto group_end =
d_group_offsets[group_label +
1]; // Cannot fall off the end, since offsets is capped with `input.size()`.
return thrust::minimum<size_type>{}(following_window, (group_end - 1) - idx);
};
if (aggr->kind == aggregation::CUDA || aggr->kind == aggregation::PTX) {
cudf::detail::preceding_window_wrapper grouped_preceding_window{
group_offsets.data().get(), group_labels.data().get(), preceding_window};
cudf::detail::following_window_wrapper grouped_following_window{
group_offsets.data().get(), group_labels.data().get(), following_window};
return cudf::detail::rolling_window_udf(input,
grouped_preceding_window,
"cudf::detail::preceding_window_wrapper",
grouped_following_window,
"cudf::detail::following_window_wrapper",
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
} else {
return cudf::detail::rolling_window(
input,
default_outputs,
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
preceding_calculator),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
following_calculator),
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
}
}
namespace {
bool is_supported_range_frame_unit(cudf::data_type const& data_type)
{
auto id = data_type.id();
return id == cudf::type_id::TIMESTAMP_DAYS || id == cudf::type_id::TIMESTAMP_SECONDS ||
id == cudf::type_id::TIMESTAMP_MILLISECONDS ||
id == cudf::type_id::TIMESTAMP_MICROSECONDS || id == cudf::type_id::TIMESTAMP_NANOSECONDS;
}
/// Fetches multiplication factor to normalize window sizes, depending on the datatype of the
/// timestamp column. Used for time-based rolling-window operations. E.g. If the timestamp column is
/// in TIMESTAMP_SECONDS, and the window sizes are specified in DAYS, the window size needs to be
/// multiplied by `24*60*60`, before comparisons with the timestamps.
size_t multiplication_factor(cudf::data_type const& data_type)
{
// Assume timestamps.
switch (data_type.id()) {
case cudf::type_id::TIMESTAMP_DAYS: return 1L;
case cudf::type_id::TIMESTAMP_SECONDS: return 24L * 60 * 60;
case cudf::type_id::TIMESTAMP_MILLISECONDS: return 24L * 60 * 60 * 1000;
case cudf::type_id::TIMESTAMP_MICROSECONDS: return 24L * 60 * 60 * 1000 * 1000;
default:
CUDF_EXPECTS(data_type.id() == cudf::type_id::TIMESTAMP_NANOSECONDS,
"Unexpected data-type for timestamp-based rolling window operation!");
return 24L * 60 * 60 * 1000 * 1000 * 1000;
}
}
/// Given a single, ungrouped timestamp column, return the indices corresponding
/// to the first null timestamp, and (one past) the last null timestamp.
/// The input column is sorted, with all null values clustered either
/// at the beginning of the column or at the end.
/// If no null values are founds, null_begin and null_end are 0.
std::tuple<size_type, size_type> get_null_bounds_for_timestamp_column(
column_view const& timestamp_column)
{
auto const num_rows = timestamp_column.size();
auto const num_nulls = timestamp_column.null_count();
if (num_nulls == num_rows || num_nulls == 0) {
// Short-circuit: All nulls, or no nulls.
return std::make_tuple(0, num_nulls);
}
auto const first_row_is_null = timestamp_column.null_count(0, 1) == 1;
return first_row_is_null ? std::make_tuple(0, num_nulls)
: std::make_tuple(num_rows - num_nulls, num_rows);
}
/// Time-range window computation, with
/// 1. no grouping keys specified
/// 2. timetamps in ASCENDING order.
/// Treat as one single group.
template <typename TimeT>
std::unique_ptr<column> time_range_window_ASC(column_view const& input,
column_view const& timestamp_column,
TimeT preceding_window,
bool preceding_window_is_unbounded,
TimeT following_window,
bool following_window_is_unbounded,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
size_type nulls_begin_idx, nulls_end_idx;
std::tie(nulls_begin_idx, nulls_end_idx) = get_null_bounds_for_timestamp_column(timestamp_column);
auto preceding_calculator =
[nulls_begin_idx,
nulls_end_idx,
d_timestamps = timestamp_column.data<TimeT>(),
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
if (preceding_window_is_unbounded) {
return idx + 1; // Technically `idx - 0 + 1`,
// where 0 == Group start,
// and 1 accounts for the current row
}
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Must consider beginning of null-group as window start.
return idx - nulls_begin_idx + 1;
}
// timestamp[idx] not null. Binary search the group, excluding null group.
// If nulls_begin_idx == 0, either
// 1. NULLS FIRST ordering: Binary search starts where nulls_end_idx.
// 2. NO NULLS: Binary search starts at 0 (also nulls_end_idx).
// Otherwise, NULLS LAST ordering. Start at 0.
auto group_start = nulls_begin_idx == 0 ? nulls_end_idx : 0;
auto lowest_timestamp_in_window = d_timestamps[idx] - preceding_window;
return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq,
d_timestamps + group_start,
d_timestamps + idx,
lowest_timestamp_in_window)) +
1; // Add 1, for `preceding` to account for current row.
};
auto following_calculator =
[nulls_begin_idx,
nulls_end_idx,
num_rows = input.size(),
d_timestamps = timestamp_column.data<TimeT>(),
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
if (following_window_is_unbounded) { return num_rows - idx - 1; }
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Window ends at the end of the null group.
return nulls_end_idx - idx - 1;
}
// timestamp[idx] not null. Binary search the group, excluding null group.
// If nulls_begin_idx == 0, either
// 1. NULLS FIRST ordering: Binary search ends at num_rows.
// 2. NO NULLS: Binary search also ends at num_rows.
// Otherwise, NULLS LAST ordering. End at nulls_begin_idx.
auto group_end = nulls_begin_idx == 0 ? num_rows : nulls_begin_idx;
auto highest_timestamp_in_window = d_timestamps[idx] + following_window;
return (thrust::upper_bound(thrust::seq,
d_timestamps + idx,
d_timestamps + group_end,
highest_timestamp_in_window) -
(d_timestamps + idx)) -
1;
};
return cudf::detail::rolling_window(
input,
empty_like(input)->view(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
preceding_calculator),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
following_calculator),
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
}
/// Given a timestamp column grouped as specified in group_offsets,
/// return the following two vectors:
/// 1. Vector with one entry per group, indicating the offset in the group
/// where the null values begin.
/// 2. Vector with one entry per group, indicating the offset in the group
/// where the null values end. (i.e. 1 past the last null.)
/// Each group in the input timestamp column must be sorted,
/// with null values clustered at either the start or the end of each group.
/// If there are no nulls for any given group, (nulls_begin, nulls_end) == (0,0).
std::tuple<rmm::device_vector<size_type>, rmm::device_vector<size_type>>
get_null_bounds_for_timestamp_column(column_view const& timestamp_column,
rmm::device_vector<size_type> const& group_offsets)
{
// For each group, the null values are themselves clustered
// at the beginning or the end of the group.
// These nulls cannot participate, except in their own window.
// If the input has n groups, group_offsets will have n+1 values.
// null_start and null_end should eventually have 1 entry per group.
auto null_start = rmm::device_vector<size_type>(group_offsets.begin(), group_offsets.end() - 1);
auto null_end = rmm::device_vector<size_type>(group_offsets.begin(), group_offsets.end() - 1);
if (timestamp_column.has_nulls()) {
auto p_timestamps_device_view = column_device_view::create(timestamp_column);
auto num_groups = group_offsets.size();
// Null timestamps exist. Find null bounds, per group.
thrust::for_each(
thrust::device,
thrust::make_counting_iterator(static_cast<size_type>(0)),
thrust::make_counting_iterator(static_cast<size_type>(num_groups)),
[d_timestamps = *p_timestamps_device_view,
d_group_offsets = group_offsets.data().get(),
d_null_start = null_start.data(),
d_null_end = null_end.data()] __device__(auto group_label) {
auto group_start = d_group_offsets[group_label];
auto group_end = d_group_offsets[group_label + 1];
auto first_element_is_null = d_timestamps.is_null_nocheck(group_start);
auto last_element_is_null = d_timestamps.is_null_nocheck(group_end - 1);
if (!first_element_is_null && !last_element_is_null) {
// Short circuit: No nulls.
d_null_start[group_label] = group_start;
d_null_end[group_label] = group_start;
} else if (first_element_is_null && last_element_is_null) {
// Short circuit: All nulls.
d_null_start[group_label] = group_start;
d_null_end[group_label] = group_end;
} else if (first_element_is_null) {
// NULLS FIRST.
d_null_start[group_label] = group_start;
d_null_end[group_label] = *thrust::partition_point(
thrust::seq,
thrust::make_counting_iterator(group_start),
thrust::make_counting_iterator(group_end),
[&d_timestamps] __device__(auto i) { return d_timestamps.is_null_nocheck(i); });
} else {
// NULLS LAST.
d_null_end[group_label] = group_end;
d_null_start[group_label] = *thrust::partition_point(
thrust::seq,
thrust::make_counting_iterator(group_start),
thrust::make_counting_iterator(group_end),
[&d_timestamps] __device__(auto i) { return d_timestamps.is_valid_nocheck(i); });
}
});
}
return std::make_tuple(std::move(null_start), std::move(null_end));
}
// Time-range window computation, for timestamps in ASCENDING order.
template <typename TimeT>
std::unique_ptr<column> time_range_window_ASC(
column_view const& input,
column_view const& timestamp_column,
rmm::device_vector<cudf::size_type> const& group_offsets,
rmm::device_vector<cudf::size_type> const& group_labels,
TimeT preceding_window,
bool preceding_window_is_unbounded,
TimeT following_window,
bool following_window_is_unbounded,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
rmm::device_vector<size_type> null_start, null_end;
std::tie(null_start, null_end) =
get_null_bounds_for_timestamp_column(timestamp_column, group_offsets);
auto preceding_calculator =
[d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
d_timestamps = timestamp_column.data<TimeT>(),
d_nulls_begin = null_start.data().get(),
d_nulls_end = null_end.data().get(),
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
auto nulls_begin = d_nulls_begin[group_label];
auto nulls_end = d_nulls_end[group_label];
if (preceding_window_is_unbounded) { return idx - group_start + 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window starts at the start of the null group.
return idx - nulls_begin + 1;
}
// timestamp[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search must begin at nulls_end.
// 2. NO NULLS: Search must begin at group_start (which also equals nulls_end.)
// Otherwise, NULLS LAST ordering. Search must start at nulls group_start.
auto search_start = nulls_begin == group_start ? nulls_end : group_start;
auto lowest_timestamp_in_window = d_timestamps[idx] - preceding_window;
return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq,
d_timestamps + search_start,
d_timestamps + idx,
lowest_timestamp_in_window)) +
1; // Add 1, for `preceding` to account for current row.
};
auto following_calculator =
[d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
d_timestamps = timestamp_column.data<TimeT>(),
d_nulls_begin = null_start.data().get(),
d_nulls_end = null_end.data().get(),
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
auto group_end =
d_group_offsets[group_label +
1]; // Cannot fall off the end, since offsets is capped with `input.size()`.
auto nulls_begin = d_nulls_begin[group_label];
auto nulls_end = d_nulls_end[group_label];
if (following_window_is_unbounded) { return (group_end - idx) - 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window ends at the end of the null group.
return nulls_end - idx - 1;
}
// timestamp[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search ends at group_end.
// 2. NO NULLS: Search ends at group_end.
// Otherwise, NULLS LAST ordering. Search ends at nulls_begin.
auto search_end = nulls_begin == group_start ? group_end : nulls_begin;
auto highest_timestamp_in_window = d_timestamps[idx] + following_window;
return (thrust::upper_bound(thrust::seq,
d_timestamps + idx,
d_timestamps + search_end,
highest_timestamp_in_window) -
(d_timestamps + idx)) -
1;
};
return cudf::detail::rolling_window(
input,
empty_like(input)->view(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
preceding_calculator),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
following_calculator),
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
}
/// Time-range window computation, with
/// 1. no grouping keys specified
/// 2. timetamps in DESCENDING order.
/// Treat as one single group.
template <typename TimeT>
std::unique_ptr<column> time_range_window_DESC(column_view const& input,
column_view const& timestamp_column,
TimeT preceding_window,
bool preceding_window_is_unbounded,
TimeT following_window,
bool following_window_is_unbounded,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
size_type nulls_begin_idx, nulls_end_idx;
std::tie(nulls_begin_idx, nulls_end_idx) = get_null_bounds_for_timestamp_column(timestamp_column);
auto preceding_calculator =
[nulls_begin_idx,
nulls_end_idx,
d_timestamps = timestamp_column.data<TimeT>(),
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
if (preceding_window_is_unbounded) {
return idx + 1; // Technically `idx - 0 + 1`,
// where 0 == Group start,
// and 1 accounts for the current row
}
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Must consider beginning of null-group as window start.
return idx - nulls_begin_idx + 1;
}
// timestamp[idx] not null. Binary search the group, excluding null group.
// If nulls_begin_idx == 0, either
// 1. NULLS FIRST ordering: Binary search starts where nulls_end_idx.
// 2. NO NULLS: Binary search starts at 0 (also nulls_end_idx).
// Otherwise, NULLS LAST ordering. Start at 0.
auto group_start = nulls_begin_idx == 0 ? nulls_end_idx : 0;
auto highest_timestamp_in_window = d_timestamps[idx] + preceding_window;
return ((d_timestamps + idx) -
thrust::lower_bound(thrust::seq,
d_timestamps + group_start,
d_timestamps + idx,
highest_timestamp_in_window,
thrust::greater<decltype(highest_timestamp_in_window)>())) +
1; // Add 1, for `preceding` to account for current row.
};
auto following_calculator =
[nulls_begin_idx,
nulls_end_idx,
num_rows = input.size(),
d_timestamps = timestamp_column.data<TimeT>(),
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
if (following_window_is_unbounded) { return (num_rows - idx) - 1; }
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Window ends at the end of the null group.
return nulls_end_idx - idx - 1;
}
// timestamp[idx] not null. Search must exclude null group.
// If nulls_begin_idx = 0, either
// 1. NULLS FIRST ordering: Search ends at num_rows.
// 2. NO NULLS: Search also ends at num_rows.
// Otherwise, NULLS LAST ordering: End at nulls_begin_idx.
auto group_end = nulls_begin_idx == 0 ? num_rows : nulls_begin_idx;
auto lowest_timestamp_in_window = d_timestamps[idx] - following_window;
return (thrust::upper_bound(thrust::seq,
d_timestamps + idx,
d_timestamps + group_end,
lowest_timestamp_in_window,
thrust::greater<decltype(lowest_timestamp_in_window)>()) -
(d_timestamps + idx)) -
1;
};
return cudf::detail::rolling_window(
input,
empty_like(input)->view(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
preceding_calculator),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
following_calculator),
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
}
// Time-range window computation, for timestamps in DESCENDING order.
template <typename TimeT>
std::unique_ptr<column> time_range_window_DESC(
column_view const& input,
column_view const& timestamp_column,
rmm::device_vector<cudf::size_type> const& group_offsets,
rmm::device_vector<cudf::size_type> const& group_labels,
TimeT preceding_window,
bool preceding_window_is_unbounded,
TimeT following_window,
bool following_window_is_unbounded,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
rmm::device_vector<size_type> null_start, null_end;
std::tie(null_start, null_end) =
get_null_bounds_for_timestamp_column(timestamp_column, group_offsets);
auto preceding_calculator =
[d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
d_timestamps = timestamp_column.data<TimeT>(),
d_nulls_begin = null_start.data().get(),
d_nulls_end = null_end.data().get(),
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
auto nulls_begin = d_nulls_begin[group_label];
auto nulls_end = d_nulls_end[group_label];
if (preceding_window_is_unbounded) { return (idx - group_start) + 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window starts at the start of the null group.
return idx - nulls_begin + 1;
}
// timestamp[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search must begin at nulls_end.
// 2. NO NULLS: Search must begin at group_start (which also equals nulls_end.)
// Otherwise, NULLS LAST ordering. Search must start at nulls group_start.
auto search_start = nulls_begin == group_start ? nulls_end : group_start;
auto highest_timestamp_in_window = d_timestamps[idx] + preceding_window;
return ((d_timestamps + idx) -
thrust::lower_bound(thrust::seq,
d_timestamps + search_start,
d_timestamps + idx,
highest_timestamp_in_window,
thrust::greater<decltype(highest_timestamp_in_window)>())) +
1; // Add 1, for `preceding` to account for current row.
};
auto following_calculator =
[d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
d_timestamps = timestamp_column.data<TimeT>(),
d_nulls_begin = null_start.data().get(),
d_nulls_end = null_end.data().get(),
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
auto group_end = d_group_offsets[group_label + 1];
auto nulls_begin = d_nulls_begin[group_label];
auto nulls_end = d_nulls_end[group_label];
if (following_window_is_unbounded) { return (group_end - idx) - 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window ends at the end of the null group.
return nulls_end - idx - 1;
}
// timestamp[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search ends at group_end.
// 2. NO NULLS: Search ends at group_end.
// Otherwise, NULLS LAST ordering. Search ends at nulls_begin.
auto search_end = nulls_begin == group_start ? group_end : nulls_begin;
auto lowest_timestamp_in_window = d_timestamps[idx] - following_window;
return (thrust::upper_bound(thrust::seq,
d_timestamps + idx,
d_timestamps + search_end,
lowest_timestamp_in_window,
thrust::greater<decltype(lowest_timestamp_in_window)>()) -
(d_timestamps + idx)) -
1;
};
if (aggr->kind == aggregation::CUDA || aggr->kind == aggregation::PTX) {
CUDF_FAIL("Time ranged rolling window does NOT (yet) support UDF.");
} else {
return cudf::detail::rolling_window(
input,
empty_like(input)->view(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
preceding_calculator),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
following_calculator),
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
}
}
template <typename TimeT>
std::unique_ptr<column> grouped_time_range_rolling_window_impl(
column_view const& input,
column_view const& timestamp_column,
cudf::order const& timestamp_ordering,
rmm::device_vector<cudf::size_type> const& group_offsets,
rmm::device_vector<cudf::size_type> const& group_labels,
window_bounds preceding_window_in_days, // TODO: Consider taking offset-type as type_id. Assumes
// days for now.
window_bounds following_window_in_days,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
TimeT mult_factor{static_cast<TimeT>(multiplication_factor(timestamp_column.type()))};
if (timestamp_ordering == cudf::order::ASCENDING) {
return group_offsets.empty()
? time_range_window_ASC(input,
timestamp_column,
preceding_window_in_days.value * mult_factor,
preceding_window_in_days.is_unbounded,
following_window_in_days.value * mult_factor,
following_window_in_days.is_unbounded,
min_periods,
aggr,
mr)
: time_range_window_ASC(input,
timestamp_column,
group_offsets,
group_labels,
preceding_window_in_days.value * mult_factor,
preceding_window_in_days.is_unbounded,
following_window_in_days.value * mult_factor,
following_window_in_days.is_unbounded,
min_periods,
aggr,
mr);
} else {
return group_offsets.empty()
? time_range_window_DESC(input,
timestamp_column,
preceding_window_in_days.value * mult_factor,
preceding_window_in_days.is_unbounded,
following_window_in_days.value * mult_factor,
following_window_in_days.is_unbounded,
min_periods,
aggr,
mr)
: time_range_window_DESC(input,
timestamp_column,
group_offsets,
group_labels,
preceding_window_in_days.value * mult_factor,
preceding_window_in_days.is_unbounded,
following_window_in_days.value * mult_factor,
following_window_in_days.is_unbounded,
min_periods,
aggr,
mr);
}
}
} // namespace
std::unique_ptr<column> grouped_time_range_rolling_window(table_view const& group_keys,
column_view const& timestamp_column,
cudf::order const& timestamp_order,
column_view const& input,
size_type preceding_window_in_days,
size_type following_window_in_days,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_time_range_rolling_window(group_keys,
timestamp_column,
timestamp_order,
input,
window_bounds::get(preceding_window_in_days),
window_bounds::get(following_window_in_days),
min_periods,
aggr,
mr);
}
std::unique_ptr<column> grouped_time_range_rolling_window(table_view const& group_keys,
column_view const& timestamp_column,
cudf::order const& timestamp_order,
column_view const& input,
window_bounds preceding_window_in_days,
window_bounds following_window_in_days,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (input.is_empty()) return empty_like(input);
CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()),
"Size mismatch between group_keys and input vector.");
CUDF_EXPECTS((min_periods > 0), "min_periods must be positive");
using sort_groupby_helper = cudf::groupby::detail::sort::sort_groupby_helper;
using index_vector = sort_groupby_helper::index_vector;
index_vector group_offsets, group_labels;
if (group_keys.num_columns() > 0) {
sort_groupby_helper helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES};
group_offsets = helper.group_offsets();
group_labels = helper.group_labels();
}
// Assumes that `timestamp_column` is actually of a timestamp type.
CUDF_EXPECTS(is_supported_range_frame_unit(timestamp_column.type()),
"Unsupported data-type for `timestamp`-based rolling window operation!");
return timestamp_column.type().id() == cudf::type_id::TIMESTAMP_DAYS
? grouped_time_range_rolling_window_impl<int32_t>(input,
timestamp_column,
timestamp_order,
group_offsets,
group_labels,
preceding_window_in_days,
following_window_in_days,
min_periods,
aggr,
mr)
: grouped_time_range_rolling_window_impl<int64_t>(input,
timestamp_column,
timestamp_order,
group_offsets,
group_labels,
preceding_window_in_days,
following_window_in_days,
min_periods,
aggr,
mr);
}
} // namespace cudf
| 7c86b1d17affa49de0769f9d9faabdd726ec6fa2.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rolling/jit/code/code.h>
#include <rolling/rolling_detail.hpp>
#include <rolling/rolling_jit_detail.hpp>
#include <cudf/aggregation.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/aggregation/aggregation.cuh>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/groupby/sort_helper.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/rolling.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <jit/launcher.h>
#include <jit/parser.h>
#include <jit/type.h>
#include <jit/bit.hpp.jit>
#include <jit/rolling_jit_detail.hpp.jit>
#include <jit/types.hpp.jit>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <thrust/binary_search.h>
#include <thrust/detail/execution_policy.h>
#include <thrust/execution_policy.h>
#include <thrust/find.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <memory>
namespace cudf {
namespace detail {
namespace { // anonymous
/**
* @brief Only COUNT_VALID operation is executed and count is updated
* depending on `min_periods` and returns true if it was
* valid, else false.
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls,
std::enable_if_t<op == aggregation::COUNT_VALID>* = nullptr>
bool __device__ process_rolling_window(column_device_view input,
column_device_view ignored_default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods)
{
// declare this as volatile to avoid some compiler optimizations that lead to incorrect results
// for CUDA 10.0 and below (fixed in CUDA 10.1)
volatile cudf::size_type count = 0;
bool output_is_valid = ((end_index - start_index) >= min_periods);
if (output_is_valid) {
if (!has_nulls) {
count = end_index - start_index;
} else {
count = thrust::count_if(thrust::seq,
thrust::make_counting_iterator(start_index),
thrust::make_counting_iterator(end_index),
[&input](auto i) { return input.is_valid_nocheck(i); });
}
output.element<OutputType>(current_index) = count;
}
return output_is_valid;
}
/**
* @brief Only COUNT_ALL operation is executed and count is updated
* depending on `min_periods` and returns true if it was
* valid, else false.
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls,
std::enable_if_t<op == aggregation::COUNT_ALL>* = nullptr>
bool __device__ process_rolling_window(column_device_view input,
column_device_view ignored_default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods)
{
cudf::size_type count = end_index - start_index;
bool output_is_valid = (count >= min_periods);
output.element<OutputType>(current_index) = count;
return output_is_valid;
}
/**
* @brief Calculates row-number within [start_index, end_index).
* Count is updated depending on `min_periods`
* Returns true if it was valid, else false.
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls,
std::enable_if_t<op == aggregation::ROW_NUMBER>* = nullptr>
bool __device__ process_rolling_window(column_device_view input,
column_device_view ignored_default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods)
{
bool output_is_valid = ((end_index - start_index) >= min_periods);
output.element<OutputType>(current_index) = ((current_index - start_index) + 1);
return output_is_valid;
}
/**
* @brief LEAD(N): Returns the row from the input column, at the specified offset past the
* current row.
* If the offset crosses the grouping boundary or column boundary for
* a given row, a "default" value is returned. The "default" value is null, by default.
*
* E.g. Consider an input column with the following values and grouping:
* [10, 11, 12, 13, 20, 21, 22, 23]
* <------G1-----> <------G2------>
*
* LEAD(input_col, 1) yields:
* [11, 12, 13, null, 21, 22, 23, null]
*
* LEAD(input_col, 1, 99) (where 99 indicates the default) yields:
* [11, 12, 13, 99, 21, 22, 23, 99]
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls>
std::enable_if_t<(op == aggregation::LEAD) && (cudf::is_fixed_width<InputType>()), bool> __device__
process_rolling_window(column_device_view input,
column_device_view default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods,
agg_op device_agg_op)
{
// Offsets have already been normalized.
auto row_offset = device_agg_op.row_offset;
// Check if row is invalid.
if (row_offset > (end_index - current_index - 1)) {
// Invalid row marked. Use default value, if available.
if (default_outputs.size() == 0 || default_outputs.is_null(current_index)) { return false; }
output.element<OutputType>(current_index) = default_outputs.element<OutputType>(current_index);
return true;
}
// Not an invalid row.
auto index = current_index + row_offset;
auto is_null = input.is_null(index);
if (!is_null) { output.element<OutputType>(current_index) = input.element<InputType>(index); }
return !is_null;
}
/**
* @brief LAG(N): returns the row from the input column at the specified offset preceding
* the current row.
* If the offset crosses the grouping boundary or column boundary for
* a given row, a "default" value is returned. The "default" value is null, by default.
*
* E.g. Consider an input column with the following values and grouping:
* [10, 11, 12, 13, 20, 21, 22, 23]
* <------G1-----> <------G2------>
*
* LAG(input_col, 2) yields:
* [null, null, 10, 11, null, null, 20, 21]
* LAG(input_col, 2, 99) yields:
* [99, 99, 10, 11, 99, 99, 20, 21]
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls>
std::enable_if_t<(op == aggregation::LAG) && (cudf::is_fixed_width<InputType>()), bool> __device__
process_rolling_window(column_device_view input,
column_device_view default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods,
agg_op device_agg_op)
{
// Offsets have already been normalized.
auto row_offset = device_agg_op.row_offset;
// Check if row is invalid.
if (row_offset > (current_index - start_index)) {
// Invalid row marked. Use default value, if available.
if (default_outputs.size() == 0 || default_outputs.is_null(current_index)) { return false; }
output.element<OutputType>(current_index) = default_outputs.element<OutputType>(current_index);
return true;
}
// Not an invalid row.
auto index = current_index - row_offset;
auto is_null = input.is_null(index);
if (!is_null) { output.element<OutputType>(current_index) = input.element<InputType>(index); }
return !is_null;
}
/**
* @brief Only used for `string_view` type to get ARGMIN and ARGMAX, which
* will be used to gather MIN and MAX. And returns true if the
* operation was valid, else false.
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls,
std::enable_if_t<(op == aggregation::ARGMIN or op == aggregation::ARGMAX) and
std::is_same<InputType, cudf::string_view>::value>* = nullptr>
bool __device__ process_rolling_window(column_device_view input,
column_device_view ignored_default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods)
{
// declare this as volatile to avoid some compiler optimizations that lead to incorrect results
// for CUDA 10.0 and below (fixed in CUDA 10.1)
volatile cudf::size_type count = 0;
InputType val = agg_op::template identity<InputType>();
OutputType val_index = (op == aggregation::ARGMIN) ? ARGMIN_SENTINEL : ARGMAX_SENTINEL;
for (size_type j = start_index; j < end_index; j++) {
if (!has_nulls || input.is_valid(j)) {
InputType element = input.element<InputType>(j);
val = agg_op{}(element, val);
if (val == element) { val_index = j; }
count++;
}
}
bool output_is_valid = (count >= min_periods);
// -1 will help identify null elements while gathering for Min and Max
// In case of count, this would be null, so doesn't matter.
output.element<OutputType>(current_index) = (output_is_valid) ? val_index : -1;
// The gather mask shouldn't contain null values, so
// always return zero
return true;
}
/**
* @brief Operates on only fixed-width types and returns true if the
* operation was valid, else false.
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
bool has_nulls,
std::enable_if_t<!std::is_same<InputType, cudf::string_view>::value and
!(op == aggregation::COUNT_VALID || op == aggregation::COUNT_ALL ||
op == aggregation::ROW_NUMBER || op == aggregation::LEAD ||
op == aggregation::LAG)>* = nullptr>
bool __device__ process_rolling_window(column_device_view input,
column_device_view ignored_default_outputs,
mutable_column_device_view output,
size_type start_index,
size_type end_index,
size_type current_index,
size_type min_periods)
{
// declare this as volatile to avoid some compiler optimizations that lead to incorrect results
// for CUDA 10.0 and below (fixed in CUDA 10.1)
volatile cudf::size_type count = 0;
OutputType val = agg_op::template identity<OutputType>();
for (size_type j = start_index; j < end_index; j++) {
if (!has_nulls || input.is_valid(j)) {
OutputType element = input.element<InputType>(j);
val = agg_op{}(element, val);
count++;
}
}
bool output_is_valid = (count >= min_periods);
// store the output value, one per thread
cudf::detail::rolling_store_output_functor<OutputType, op == aggregation::MEAN>{}(
output.element<OutputType>(current_index), val, count);
return output_is_valid;
}
/**
* @brief Computes the rolling window function
*
* @tparam InputType Datatype of `input`
* @tparam OutputType Datatype of `output`
* @tparam agg_op A functor that defines the aggregation operation
* @tparam op The aggregation operator (enum value)
* @tparam block_size CUDA block size for the kernel
* @tparam has_nulls true if the input column has nulls
* @tparam PrecedingWindowIterator iterator type (inferred)
* @tparam FollowingWindowIterator iterator type (inferred)
* @param input Input column device view
* @param output Output column device view
* @param preceding_window_begin[in] Rolling window size iterator, accumulates from
* in_col[i-preceding_window] to in_col[i] inclusive
* @param following_window_begin[in] Rolling window size iterator in the forward
* direction, accumulates from in_col[i] to
* in_col[i+following_window] inclusive
* @param min_periods[in] Minimum number of observations in window required to
* have a value, otherwise 0 is stored in the valid bit mask
*/
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
int block_size,
bool has_nulls,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
__launch_bounds__(block_size) __global__
void gpu_rolling(column_device_view input,
column_device_view default_outputs,
mutable_column_device_view output,
size_type* __restrict__ output_valid_count,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods)
{
size_type i = blockIdx.x * block_size + threadIdx.x;
size_type stride = block_size * gridDim.x;
size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffffffff, i < input.size());
while (i < input.size()) {
size_type preceding_window = preceding_window_begin[i];
size_type following_window = following_window_begin[i];
// compute bounds
size_type start = min(input.size(), max(0, i - preceding_window + 1));
size_type end = min(input.size(), max(0, i + following_window + 1));
size_type start_index = min(start, end);
size_type end_index = max(start, end);
// aggregate
// TODO: We should explore using shared memory to avoid redundant loads.
// This might require separating the kernel into a special version
// for dynamic and static sizes.
volatile bool output_is_valid = false;
output_is_valid = process_rolling_window<InputType, OutputType, agg_op, op, has_nulls>(
input, default_outputs, output, start_index, end_index, i, min_periods);
// set the mask
cudf::bitmask_type result_mask{__ballot_sync(active_threads, output_is_valid)};
// only one thread writes the mask
if (0 == threadIdx.x % cudf::detail::warp_size) {
output.set_mask_word(cudf::word_index(i), result_mask);
warp_valid_count += __popc(result_mask);
}
// process next element
i += stride;
active_threads = __ballot_sync(active_threads, i < input.size());
}
// sum the valid counts across the whole block
size_type block_valid_count =
cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
template <typename InputType,
typename OutputType,
typename agg_op,
aggregation::Kind op,
int block_size,
bool has_nulls,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
__launch_bounds__(block_size) __global__
void gpu_rolling(column_device_view input,
column_device_view default_outputs,
mutable_column_device_view output,
size_type* __restrict__ output_valid_count,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
agg_op device_agg_op)
{
size_type i = blockIdx.x * block_size + threadIdx.x;
size_type stride = block_size * gridDim.x;
size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffffffff, i < input.size());
while (i < input.size()) {
size_type preceding_window = preceding_window_begin[i];
size_type following_window = following_window_begin[i];
// compute bounds
size_type start = min(input.size(), max(0, i - preceding_window + 1));
size_type end = min(input.size(), max(0, i + following_window + 1));
size_type start_index = min(start, end);
size_type end_index = max(start, end);
// aggregate
// TODO: We should explore using shared memory to avoid redundant loads.
// This might require separating the kernel into a special version
// for dynamic and static sizes.
volatile bool output_is_valid = false;
output_is_valid = process_rolling_window<InputType, OutputType, agg_op, op, has_nulls>(
input, default_outputs, output, start_index, end_index, i, min_periods, device_agg_op);
// set the mask
cudf::bitmask_type result_mask{__ballot_sync(active_threads, output_is_valid)};
// only one thread writes the mask
if (0 == threadIdx.x % cudf::detail::warp_size) {
output.set_mask_word(cudf::word_index(i), result_mask);
warp_valid_count += __popc(result_mask);
}
// process next element
i += stride;
active_threads = __ballot_sync(active_threads, i < input.size());
}
// sum the valid counts across the whole block
size_type block_valid_count =
cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
template <typename InputType>
struct rolling_window_launcher {
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
size_type kernel_launcher(column_view const& input,
column_view const& default_outputs,
mutable_column_view& output,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream)
{
constexpr cudf::size_type block_size = 256;
cudf::detail::grid_1d grid(input.size(), block_size);
auto input_device_view = column_device_view::create(input, stream);
auto output_device_view = mutable_column_device_view::create(output, stream);
auto default_outputs_device_view = column_device_view::create(default_outputs, stream);
rmm::device_scalar<size_type> device_valid_count{0, stream};
if (input.has_nulls()) {
gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, true>
<<<grid.num_blocks, block_size, 0, stream.value()>>>(*input_device_view,
*default_outputs_device_view,
*output_device_view,
device_valid_count.data(),
preceding_window_begin,
following_window_begin,
min_periods);
} else {
gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, false>
<<<grid.num_blocks, block_size, 0, stream.value()>>>(*input_device_view,
*default_outputs_device_view,
*output_device_view,
device_valid_count.data(),
preceding_window_begin,
following_window_begin,
min_periods);
}
size_type valid_count = device_valid_count.value(stream);
// check the stream for debugging
CHECK_CUDA(stream.value());
return valid_count;
}
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
size_type kernel_launcher(column_view const& input,
column_view const& default_outputs,
mutable_column_view& output,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
agg_op const& device_agg_op,
rmm::cuda_stream_view stream)
{
constexpr cudf::size_type block_size = 256;
cudf::detail::grid_1d grid(input.size(), block_size);
auto input_device_view = column_device_view::create(input, stream);
auto output_device_view = mutable_column_device_view::create(output, stream);
auto default_outputs_device_view = column_device_view::create(default_outputs, stream);
rmm::device_scalar<size_type> device_valid_count{0, stream};
if (input.has_nulls()) {
gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, true>
<<<grid.num_blocks, block_size, 0, stream.value()>>>(*input_device_view,
*default_outputs_device_view,
*output_device_view,
device_valid_count.data(),
preceding_window_begin,
following_window_begin,
min_periods,
device_agg_op);
} else {
gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, false>
<<<grid.num_blocks, block_size, 0, stream.value()>>>(*input_device_view,
*default_outputs_device_view,
*output_device_view,
device_valid_count.data(),
preceding_window_begin,
following_window_begin,
min_periods,
device_agg_op);
}
size_type valid_count = device_valid_count.value(stream);
// check the stream for debugging
CHECK_CUDA(stream.value());
return valid_count;
}
// This launch is only for fixed width columns with valid aggregation option
// numeric: All
// timestamp: MIN, MAX, COUNT_VALID, COUNT_ALL, ROW_NUMBER
// string, dictionary, list : COUNT_VALID, COUNT_ALL, ROW_NUMBER
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<cudf::detail::is_rolling_supported<T, agg_op, op>() and
!cudf::detail::is_rolling_string_specialization<T, agg_op, op>(),
std::unique_ptr<column>>
launch(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
auto output = make_fixed_width_column(
target_type(input.type(), op), input.size(), mask_state::UNINITIALIZED, stream, mr);
cudf::mutable_column_view output_view = output->mutable_view();
auto valid_count =
kernel_launcher<T, agg_op, op, PrecedingWindowIterator, FollowingWindowIterator>(
input,
default_outputs,
output_view,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream);
output->set_null_count(output->size() - valid_count);
return output;
}
// This launch is only for string specializations
// string: MIN, MAX
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<cudf::detail::is_rolling_string_specialization<T, agg_op, op>(),
std::unique_ptr<column>>
launch(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
auto output = make_numeric_column(cudf::data_type{cudf::type_to_id<size_type>()},
input.size(),
cudf::mask_state::UNINITIALIZED,
stream,
mr);
cudf::mutable_column_view output_view = output->mutable_view();
// Passing the agg_op and aggregation::Kind as constant to group them in pair, else it
// evolves to error when try to use agg_op as compiler tries different combinations
if (op == aggregation::MIN) {
kernel_launcher<T,
DeviceMin,
aggregation::ARGMIN,
PrecedingWindowIterator,
FollowingWindowIterator>(input,
default_outputs,
output_view,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream);
} else if (op == aggregation::MAX) {
kernel_launcher<T,
DeviceMax,
aggregation::ARGMAX,
PrecedingWindowIterator,
FollowingWindowIterator>(input,
default_outputs,
output_view,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream);
} else {
CUDF_FAIL("MIN and MAX are the only supported aggregation types for string columns");
}
// The rows that represent null elements will be having negative values in gather map,
// and that's why nullify_out_of_bounds/ignore_out_of_bounds is true.
auto output_table = detail::gather(table_view{{input}},
output->view(),
cudf::out_of_bounds_policy::NULLIFY,
detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
return std::make_unique<cudf::column>(std::move(output_table->get_column(0)));
}
// Deals with invalid column and/or aggregation options
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<!cudf::detail::is_rolling_supported<T, agg_op, op>() and
!cudf::detail::is_rolling_string_specialization<T, agg_op, op>(),
std::unique_ptr<column>>
launch(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("Aggregation operator and/or input type combination is invalid");
}
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<cudf::is_fixed_width<T>() and
(op == aggregation::LEAD || op == aggregation::LAG),
std::unique_ptr<column>>
launch(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
agg_op const& device_agg_op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
CUDF_EXPECTS(default_outputs.type().id() == input.type().id(),
"Defaults column type must match input column."); // Because LEAD/LAG.
// For LEAD(0)/LAG(0), no computation need be performed.
// Return copy of input.
if (0 == static_cast<cudf::detail::lead_lag_aggregation*>(agg.get())->row_offset) {
return std::make_unique<column>(input, stream, mr);
}
auto output = make_fixed_width_column(
target_type(input.type(), op), input.size(), mask_state::UNINITIALIZED, stream, mr);
cudf::mutable_column_view output_view = output->mutable_view();
auto valid_count =
kernel_launcher<T, agg_op, op, PrecedingWindowIterator, FollowingWindowIterator>(
input,
default_outputs,
output_view,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
device_agg_op,
stream);
output->set_null_count(output->size() - valid_count);
return output;
}
// Deals with invalid column and/or aggregation options
template <typename T,
typename agg_op,
aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<!(op == aggregation::LEAD || op == aggregation::LAG) ||
!cudf::is_fixed_width<T>(),
std::unique_ptr<column>>
launch(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
agg_op device_agg_op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL(
"Aggregation operator and/or input type combination is invalid: "
"LEAD/LAG supported only on fixed-width types");
}
template <aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<!(op == aggregation::MEAN || op == aggregation::LEAD || op == aggregation::LAG),
std::unique_ptr<column>>
operator()(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(default_outputs.is_empty(),
"Only LEAD/LAG window functions support default values.");
return launch<InputType,
typename corresponding_operator<op>::type,
op,
PrecedingWindowIterator,
FollowingWindowIterator>(input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream,
mr);
}
// This variant is just to handle mean
template <aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<(op == aggregation::MEAN), std::unique_ptr<column>> operator()(
column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return launch<InputType, cudf::DeviceSum, op, PrecedingWindowIterator, FollowingWindowIterator>(
input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream,
mr);
}
template <aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<(op == aggregation::LEAD || op == aggregation::LAG), std::unique_ptr<column>>
operator()(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return launch<InputType,
cudf::DeviceLeadLag,
op,
PrecedingWindowIterator,
FollowingWindowIterator>(
input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
cudf::DeviceLeadLag{static_cast<cudf::detail::lead_lag_aggregation*>(agg.get())->row_offset},
stream,
mr);
}
};
struct dispatch_rolling {
template <typename T, typename PrecedingWindowIterator, typename FollowingWindowIterator>
std::unique_ptr<column> operator()(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return aggregation_dispatcher(agg->kind,
rolling_window_launcher<T>{},
input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream,
mr);
}
};
} // namespace
// Applies a user-defined rolling window function to the values in a column.
template <typename PrecedingWindowIterator, typename FollowingWindowIterator>
std::unique_ptr<column> rolling_window_udf(column_view const& input,
PrecedingWindowIterator preceding_window,
std::string const& preceding_window_str,
FollowingWindowIterator following_window,
std::string const& following_window_str,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"bitmask_type size does not match CUDA warp size");
if (input.has_nulls())
CUDF_FAIL("Currently the UDF version of rolling window does NOT support inputs with nulls.");
min_periods = std::max(min_periods, 0);
auto udf_agg = static_cast<udf_aggregation*>(agg.get());
std::string hash = "prog_rolling." + std::to_string(std::hash<std::string>{}(udf_agg->_source));
std::string cuda_source;
switch (udf_agg->kind) {
case aggregation::Kind::PTX:
cuda_source = cudf::rolling::jit::code::kernel_headers;
cuda_source +=
cudf::jit::parse_single_function_ptx(udf_agg->_source,
udf_agg->_function_name,
cudf::jit::get_type_name(udf_agg->_output_type),
{0, 5}); // args 0 and 5 are pointers.
cuda_source += cudf::rolling::jit::code::kernel;
break;
case aggregation::Kind::CUDA:
cuda_source = cudf::rolling::jit::code::kernel_headers;
cuda_source +=
cudf::jit::parse_single_function_cuda(udf_agg->_source, udf_agg->_function_name);
cuda_source += cudf::rolling::jit::code::kernel;
break;
default: CUDF_FAIL("Unsupported UDF type.");
}
std::unique_ptr<column> output = make_numeric_column(
udf_agg->_output_type, input.size(), cudf::mask_state::UNINITIALIZED, stream, mr);
auto output_view = output->mutable_view();
rmm::device_scalar<size_type> device_valid_count{0, stream};
const std::vector<std::string> compiler_flags{"-std=c++14",
// Have jitify prune unused global variables
"-remove-unused-globals",
// suppress all NVRTC warnings
"-w"};
// Launch the jitify kernel
cudf::jit::launcher(hash,
cuda_source,
{cudf_types_hpp,
cudf_utilities_bit_hpp,
cudf::rolling::jit::code::operation_h,
___src_rolling_rolling_jit_detail_hpp},
compiler_flags,
nullptr,
stream)
.set_kernel_inst("gpu_rolling_new", // name of the kernel we are launching
{cudf::jit::get_type_name(input.type()), // list of template arguments
cudf::jit::get_type_name(output->type()),
udf_agg->_operator_name,
preceding_window_str.c_str(),
following_window_str.c_str()})
.launch(input.size(),
cudf::jit::get_data_ptr(input),
input.null_mask(),
cudf::jit::get_data_ptr(output_view),
output_view.null_mask(),
device_valid_count.data(),
preceding_window,
following_window,
min_periods);
output->set_null_count(output->size() - device_valid_count.value(stream));
// check the stream for debugging
CHECK_CUDA(stream.value());
return output;
}
/**
* @copydoc cudf::rolling_window(column_view const& input,
* PrecedingWindowIterator preceding_window_begin,
* FollowingWindowIterator following_window_begin,
* size_type min_periods,
* std::unique_ptr<aggregation> const& agg,
* rmm::mr::device_memory_resource* mr)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
template <typename PrecedingWindowIterator, typename FollowingWindowIterator>
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"bitmask_type size does not match CUDA warp size");
min_periods = std::max(min_periods, 0);
return cudf::type_dispatcher(input.type(),
dispatch_rolling{},
input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream,
mr);
}
} // namespace detail
// Applies a fixed-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
size_type preceding_window,
size_type following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr)
{
return rolling_window(
input, empty_like(input)->view(), preceding_window, following_window, min_periods, agg, mr);
}
// Applies a fixed-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& default_outputs,
size_type preceding_window,
size_type following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (input.is_empty()) return empty_like(input);
CUDF_EXPECTS((min_periods >= 0), "min_periods must be non-negative");
CUDF_EXPECTS((default_outputs.is_empty() || default_outputs.size() == input.size()),
"Defaults column must be either empty or have as many rows as the input column.");
if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) {
return cudf::detail::rolling_window_udf(input,
preceding_window,
"cudf::size_type",
following_window,
"cudf::size_type",
min_periods,
agg,
rmm::cuda_stream_default,
mr);
} else {
auto preceding_window_begin = thrust::make_constant_iterator(preceding_window);
auto following_window_begin = thrust::make_constant_iterator(following_window);
return cudf::detail::rolling_window(input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
rmm::cuda_stream_default,
mr);
}
}
// Applies a variable-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& preceding_window,
column_view const& following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (preceding_window.is_empty() || following_window.is_empty() || input.is_empty())
return empty_like(input);
CUDF_EXPECTS(preceding_window.type().id() == type_id::INT32 &&
following_window.type().id() == type_id::INT32,
"preceding_window/following_window must have type_id::INT32 type");
CUDF_EXPECTS(preceding_window.size() == input.size() && following_window.size() == input.size(),
"preceding_window/following_window size must match input size");
if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) {
return cudf::detail::rolling_window_udf(input,
preceding_window.begin<size_type>(),
"cudf::size_type*",
following_window.begin<size_type>(),
"cudf::size_type*",
min_periods,
agg,
rmm::cuda_stream_default,
mr);
} else {
return cudf::detail::rolling_window(input,
empty_like(input)->view(),
preceding_window.begin<size_type>(),
following_window.begin<size_type>(),
min_periods,
agg,
rmm::cuda_stream_default,
mr);
}
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
size_type preceding_window,
size_type following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_rolling_window(group_keys,
input,
window_bounds::get(preceding_window),
window_bounds::get(following_window),
min_periods,
aggr,
mr);
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
window_bounds preceding_window,
window_bounds following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_rolling_window(group_keys,
input,
empty_like(input)->view(),
preceding_window,
following_window,
min_periods,
aggr,
mr);
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
column_view const& default_outputs,
size_type preceding_window,
size_type following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_rolling_window(group_keys,
input,
default_outputs,
window_bounds::get(preceding_window),
window_bounds::get(following_window),
min_periods,
aggr,
mr);
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
column_view const& default_outputs,
window_bounds preceding_window_bounds,
window_bounds following_window_bounds,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (input.is_empty()) return empty_like(input);
CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()),
"Size mismatch between group_keys and input vector.");
CUDF_EXPECTS((min_periods > 0), "min_periods must be positive");
CUDF_EXPECTS((default_outputs.is_empty() || default_outputs.size() == input.size()),
"Defaults column must be either empty or have as many rows as the input column.");
auto const preceding_window = preceding_window_bounds.value;
auto const following_window = following_window_bounds.value;
if (group_keys.num_columns() == 0) {
// No Groupby columns specified. Treat as one big group.
return rolling_window(
input, default_outputs, preceding_window, following_window, min_periods, aggr, mr);
}
using sort_groupby_helper = cudf::groupby::detail::sort::sort_groupby_helper;
sort_groupby_helper helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES};
auto group_offsets{helper.group_offsets()};
auto const& group_labels{helper.group_labels()};
// `group_offsets` are interpreted in adjacent pairs, each pair representing the offsets
// of the first, and one past the last elements in a group.
//
// If `group_offsets` is not empty, it must contain at least two offsets:
// a. 0, indicating the first element in `input`
// b. input.size(), indicating one past the last element in `input`.
//
// Thus, for an input of 1000 rows,
// 0. [] indicates a single group, spanning the entire column.
// 1 [10] is invalid.
// 2. [0, 1000] indicates a single group, spanning the entire column (thus, equivalent to no
// groups.)
// 3. [0, 500, 1000] indicates two equal-sized groups: [0,500), and [500,1000).
assert(group_offsets.size() >= 2 && group_offsets[0] == 0 &&
group_offsets[group_offsets.size() - 1] == input.size() &&
"Must have at least one group.");
auto preceding_calculator = [d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
preceding_window] __device__(size_type idx) {
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
return thrust::minimum<size_type>{}(preceding_window,
idx - group_start + 1); // Preceding includes current row.
};
auto following_calculator = [d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
following_window] __device__(size_type idx) {
auto group_label = d_group_labels[idx];
auto group_end =
d_group_offsets[group_label +
1]; // Cannot fall off the end, since offsets is capped with `input.size()`.
return thrust::minimum<size_type>{}(following_window, (group_end - 1) - idx);
};
if (aggr->kind == aggregation::CUDA || aggr->kind == aggregation::PTX) {
cudf::detail::preceding_window_wrapper grouped_preceding_window{
group_offsets.data().get(), group_labels.data().get(), preceding_window};
cudf::detail::following_window_wrapper grouped_following_window{
group_offsets.data().get(), group_labels.data().get(), following_window};
return cudf::detail::rolling_window_udf(input,
grouped_preceding_window,
"cudf::detail::preceding_window_wrapper",
grouped_following_window,
"cudf::detail::following_window_wrapper",
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
} else {
return cudf::detail::rolling_window(
input,
default_outputs,
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
preceding_calculator),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
following_calculator),
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
}
}
namespace {
bool is_supported_range_frame_unit(cudf::data_type const& data_type)
{
auto id = data_type.id();
return id == cudf::type_id::TIMESTAMP_DAYS || id == cudf::type_id::TIMESTAMP_SECONDS ||
id == cudf::type_id::TIMESTAMP_MILLISECONDS ||
id == cudf::type_id::TIMESTAMP_MICROSECONDS || id == cudf::type_id::TIMESTAMP_NANOSECONDS;
}
/// Fetches multiplication factor to normalize window sizes, depending on the datatype of the
/// timestamp column. Used for time-based rolling-window operations. E.g. If the timestamp column is
/// in TIMESTAMP_SECONDS, and the window sizes are specified in DAYS, the window size needs to be
/// multiplied by `24*60*60`, before comparisons with the timestamps.
size_t multiplication_factor(cudf::data_type const& data_type)
{
// Assume timestamps.
switch (data_type.id()) {
case cudf::type_id::TIMESTAMP_DAYS: return 1L;
case cudf::type_id::TIMESTAMP_SECONDS: return 24L * 60 * 60;
case cudf::type_id::TIMESTAMP_MILLISECONDS: return 24L * 60 * 60 * 1000;
case cudf::type_id::TIMESTAMP_MICROSECONDS: return 24L * 60 * 60 * 1000 * 1000;
default:
CUDF_EXPECTS(data_type.id() == cudf::type_id::TIMESTAMP_NANOSECONDS,
"Unexpected data-type for timestamp-based rolling window operation!");
return 24L * 60 * 60 * 1000 * 1000 * 1000;
}
}
/// Given a single, ungrouped timestamp column, return the indices corresponding
/// to the first null timestamp, and (one past) the last null timestamp.
/// The input column is sorted, with all null values clustered either
/// at the beginning of the column or at the end.
/// If no null values are founds, null_begin and null_end are 0.
std::tuple<size_type, size_type> get_null_bounds_for_timestamp_column(
column_view const& timestamp_column)
{
auto const num_rows = timestamp_column.size();
auto const num_nulls = timestamp_column.null_count();
if (num_nulls == num_rows || num_nulls == 0) {
// Short-circuit: All nulls, or no nulls.
return std::make_tuple(0, num_nulls);
}
auto const first_row_is_null = timestamp_column.null_count(0, 1) == 1;
return first_row_is_null ? std::make_tuple(0, num_nulls)
: std::make_tuple(num_rows - num_nulls, num_rows);
}
/// Time-range window computation, with
/// 1. no grouping keys specified
/// 2. timetamps in ASCENDING order.
/// Treat as one single group.
template <typename TimeT>
std::unique_ptr<column> time_range_window_ASC(column_view const& input,
column_view const& timestamp_column,
TimeT preceding_window,
bool preceding_window_is_unbounded,
TimeT following_window,
bool following_window_is_unbounded,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
size_type nulls_begin_idx, nulls_end_idx;
std::tie(nulls_begin_idx, nulls_end_idx) = get_null_bounds_for_timestamp_column(timestamp_column);
auto preceding_calculator =
[nulls_begin_idx,
nulls_end_idx,
d_timestamps = timestamp_column.data<TimeT>(),
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
if (preceding_window_is_unbounded) {
return idx + 1; // Technically `idx - 0 + 1`,
// where 0 == Group start,
// and 1 accounts for the current row
}
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Must consider beginning of null-group as window start.
return idx - nulls_begin_idx + 1;
}
// timestamp[idx] not null. Binary search the group, excluding null group.
// If nulls_begin_idx == 0, either
// 1. NULLS FIRST ordering: Binary search starts where nulls_end_idx.
// 2. NO NULLS: Binary search starts at 0 (also nulls_end_idx).
// Otherwise, NULLS LAST ordering. Start at 0.
auto group_start = nulls_begin_idx == 0 ? nulls_end_idx : 0;
auto lowest_timestamp_in_window = d_timestamps[idx] - preceding_window;
return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq,
d_timestamps + group_start,
d_timestamps + idx,
lowest_timestamp_in_window)) +
1; // Add 1, for `preceding` to account for current row.
};
auto following_calculator =
[nulls_begin_idx,
nulls_end_idx,
num_rows = input.size(),
d_timestamps = timestamp_column.data<TimeT>(),
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
if (following_window_is_unbounded) { return num_rows - idx - 1; }
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Window ends at the end of the null group.
return nulls_end_idx - idx - 1;
}
// timestamp[idx] not null. Binary search the group, excluding null group.
// If nulls_begin_idx == 0, either
// 1. NULLS FIRST ordering: Binary search ends at num_rows.
// 2. NO NULLS: Binary search also ends at num_rows.
// Otherwise, NULLS LAST ordering. End at nulls_begin_idx.
auto group_end = nulls_begin_idx == 0 ? num_rows : nulls_begin_idx;
auto highest_timestamp_in_window = d_timestamps[idx] + following_window;
return (thrust::upper_bound(thrust::seq,
d_timestamps + idx,
d_timestamps + group_end,
highest_timestamp_in_window) -
(d_timestamps + idx)) -
1;
};
return cudf::detail::rolling_window(
input,
empty_like(input)->view(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
preceding_calculator),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
following_calculator),
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
}
/// Given a timestamp column grouped as specified in group_offsets,
/// return the following two vectors:
/// 1. Vector with one entry per group, indicating the offset in the group
/// where the null values begin.
/// 2. Vector with one entry per group, indicating the offset in the group
/// where the null values end. (i.e. 1 past the last null.)
/// Each group in the input timestamp column must be sorted,
/// with null values clustered at either the start or the end of each group.
/// If there are no nulls for any given group, (nulls_begin, nulls_end) == (0,0).
std::tuple<rmm::device_vector<size_type>, rmm::device_vector<size_type>>
get_null_bounds_for_timestamp_column(column_view const& timestamp_column,
rmm::device_vector<size_type> const& group_offsets)
{
// For each group, the null values are themselves clustered
// at the beginning or the end of the group.
// These nulls cannot participate, except in their own window.
// If the input has n groups, group_offsets will have n+1 values.
// null_start and null_end should eventually have 1 entry per group.
auto null_start = rmm::device_vector<size_type>(group_offsets.begin(), group_offsets.end() - 1);
auto null_end = rmm::device_vector<size_type>(group_offsets.begin(), group_offsets.end() - 1);
if (timestamp_column.has_nulls()) {
auto p_timestamps_device_view = column_device_view::create(timestamp_column);
auto num_groups = group_offsets.size();
// Null timestamps exist. Find null bounds, per group.
thrust::for_each(
thrust::device,
thrust::make_counting_iterator(static_cast<size_type>(0)),
thrust::make_counting_iterator(static_cast<size_type>(num_groups)),
[d_timestamps = *p_timestamps_device_view,
d_group_offsets = group_offsets.data().get(),
d_null_start = null_start.data(),
d_null_end = null_end.data()] __device__(auto group_label) {
auto group_start = d_group_offsets[group_label];
auto group_end = d_group_offsets[group_label + 1];
auto first_element_is_null = d_timestamps.is_null_nocheck(group_start);
auto last_element_is_null = d_timestamps.is_null_nocheck(group_end - 1);
if (!first_element_is_null && !last_element_is_null) {
// Short circuit: No nulls.
d_null_start[group_label] = group_start;
d_null_end[group_label] = group_start;
} else if (first_element_is_null && last_element_is_null) {
// Short circuit: All nulls.
d_null_start[group_label] = group_start;
d_null_end[group_label] = group_end;
} else if (first_element_is_null) {
// NULLS FIRST.
d_null_start[group_label] = group_start;
d_null_end[group_label] = *thrust::partition_point(
thrust::seq,
thrust::make_counting_iterator(group_start),
thrust::make_counting_iterator(group_end),
[&d_timestamps] __device__(auto i) { return d_timestamps.is_null_nocheck(i); });
} else {
// NULLS LAST.
d_null_end[group_label] = group_end;
d_null_start[group_label] = *thrust::partition_point(
thrust::seq,
thrust::make_counting_iterator(group_start),
thrust::make_counting_iterator(group_end),
[&d_timestamps] __device__(auto i) { return d_timestamps.is_valid_nocheck(i); });
}
});
}
return std::make_tuple(std::move(null_start), std::move(null_end));
}
// Time-range window computation, for timestamps in ASCENDING order.
template <typename TimeT>
std::unique_ptr<column> time_range_window_ASC(
column_view const& input,
column_view const& timestamp_column,
rmm::device_vector<cudf::size_type> const& group_offsets,
rmm::device_vector<cudf::size_type> const& group_labels,
TimeT preceding_window,
bool preceding_window_is_unbounded,
TimeT following_window,
bool following_window_is_unbounded,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
rmm::device_vector<size_type> null_start, null_end;
std::tie(null_start, null_end) =
get_null_bounds_for_timestamp_column(timestamp_column, group_offsets);
auto preceding_calculator =
[d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
d_timestamps = timestamp_column.data<TimeT>(),
d_nulls_begin = null_start.data().get(),
d_nulls_end = null_end.data().get(),
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
auto nulls_begin = d_nulls_begin[group_label];
auto nulls_end = d_nulls_end[group_label];
if (preceding_window_is_unbounded) { return idx - group_start + 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window starts at the start of the null group.
return idx - nulls_begin + 1;
}
// timestamp[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search must begin at nulls_end.
// 2. NO NULLS: Search must begin at group_start (which also equals nulls_end.)
// Otherwise, NULLS LAST ordering. Search must start at nulls group_start.
auto search_start = nulls_begin == group_start ? nulls_end : group_start;
auto lowest_timestamp_in_window = d_timestamps[idx] - preceding_window;
return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq,
d_timestamps + search_start,
d_timestamps + idx,
lowest_timestamp_in_window)) +
1; // Add 1, for `preceding` to account for current row.
};
auto following_calculator =
[d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
d_timestamps = timestamp_column.data<TimeT>(),
d_nulls_begin = null_start.data().get(),
d_nulls_end = null_end.data().get(),
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
auto group_end =
d_group_offsets[group_label +
1]; // Cannot fall off the end, since offsets is capped with `input.size()`.
auto nulls_begin = d_nulls_begin[group_label];
auto nulls_end = d_nulls_end[group_label];
if (following_window_is_unbounded) { return (group_end - idx) - 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window ends at the end of the null group.
return nulls_end - idx - 1;
}
// timestamp[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search ends at group_end.
// 2. NO NULLS: Search ends at group_end.
// Otherwise, NULLS LAST ordering. Search ends at nulls_begin.
auto search_end = nulls_begin == group_start ? group_end : nulls_begin;
auto highest_timestamp_in_window = d_timestamps[idx] + following_window;
return (thrust::upper_bound(thrust::seq,
d_timestamps + idx,
d_timestamps + search_end,
highest_timestamp_in_window) -
(d_timestamps + idx)) -
1;
};
return cudf::detail::rolling_window(
input,
empty_like(input)->view(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
preceding_calculator),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
following_calculator),
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
}
/// Time-range window computation, with
/// 1. no grouping keys specified
/// 2. timetamps in DESCENDING order.
/// Treat as one single group.
template <typename TimeT>
std::unique_ptr<column> time_range_window_DESC(column_view const& input,
column_view const& timestamp_column,
TimeT preceding_window,
bool preceding_window_is_unbounded,
TimeT following_window,
bool following_window_is_unbounded,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
size_type nulls_begin_idx, nulls_end_idx;
std::tie(nulls_begin_idx, nulls_end_idx) = get_null_bounds_for_timestamp_column(timestamp_column);
auto preceding_calculator =
[nulls_begin_idx,
nulls_end_idx,
d_timestamps = timestamp_column.data<TimeT>(),
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
if (preceding_window_is_unbounded) {
return idx + 1; // Technically `idx - 0 + 1`,
// where 0 == Group start,
// and 1 accounts for the current row
}
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Must consider beginning of null-group as window start.
return idx - nulls_begin_idx + 1;
}
// timestamp[idx] not null. Binary search the group, excluding null group.
// If nulls_begin_idx == 0, either
// 1. NULLS FIRST ordering: Binary search starts where nulls_end_idx.
// 2. NO NULLS: Binary search starts at 0 (also nulls_end_idx).
// Otherwise, NULLS LAST ordering. Start at 0.
auto group_start = nulls_begin_idx == 0 ? nulls_end_idx : 0;
auto highest_timestamp_in_window = d_timestamps[idx] + preceding_window;
return ((d_timestamps + idx) -
thrust::lower_bound(thrust::seq,
d_timestamps + group_start,
d_timestamps + idx,
highest_timestamp_in_window,
thrust::greater<decltype(highest_timestamp_in_window)>())) +
1; // Add 1, for `preceding` to account for current row.
};
auto following_calculator =
[nulls_begin_idx,
nulls_end_idx,
num_rows = input.size(),
d_timestamps = timestamp_column.data<TimeT>(),
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
if (following_window_is_unbounded) { return (num_rows - idx) - 1; }
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Window ends at the end of the null group.
return nulls_end_idx - idx - 1;
}
// timestamp[idx] not null. Search must exclude null group.
// If nulls_begin_idx = 0, either
// 1. NULLS FIRST ordering: Search ends at num_rows.
// 2. NO NULLS: Search also ends at num_rows.
// Otherwise, NULLS LAST ordering: End at nulls_begin_idx.
auto group_end = nulls_begin_idx == 0 ? num_rows : nulls_begin_idx;
auto lowest_timestamp_in_window = d_timestamps[idx] - following_window;
return (thrust::upper_bound(thrust::seq,
d_timestamps + idx,
d_timestamps + group_end,
lowest_timestamp_in_window,
thrust::greater<decltype(lowest_timestamp_in_window)>()) -
(d_timestamps + idx)) -
1;
};
return cudf::detail::rolling_window(
input,
empty_like(input)->view(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
preceding_calculator),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
following_calculator),
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
}
// Time-range window computation, for timestamps in DESCENDING order.
template <typename TimeT>
std::unique_ptr<column> time_range_window_DESC(
column_view const& input,
column_view const& timestamp_column,
rmm::device_vector<cudf::size_type> const& group_offsets,
rmm::device_vector<cudf::size_type> const& group_labels,
TimeT preceding_window,
bool preceding_window_is_unbounded,
TimeT following_window,
bool following_window_is_unbounded,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
rmm::device_vector<size_type> null_start, null_end;
std::tie(null_start, null_end) =
get_null_bounds_for_timestamp_column(timestamp_column, group_offsets);
auto preceding_calculator =
[d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
d_timestamps = timestamp_column.data<TimeT>(),
d_nulls_begin = null_start.data().get(),
d_nulls_end = null_end.data().get(),
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
auto nulls_begin = d_nulls_begin[group_label];
auto nulls_end = d_nulls_end[group_label];
if (preceding_window_is_unbounded) { return (idx - group_start) + 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window starts at the start of the null group.
return idx - nulls_begin + 1;
}
// timestamp[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search must begin at nulls_end.
// 2. NO NULLS: Search must begin at group_start (which also equals nulls_end.)
// Otherwise, NULLS LAST ordering. Search must start at nulls group_start.
auto search_start = nulls_begin == group_start ? nulls_end : group_start;
auto highest_timestamp_in_window = d_timestamps[idx] + preceding_window;
return ((d_timestamps + idx) -
thrust::lower_bound(thrust::seq,
d_timestamps + search_start,
d_timestamps + idx,
highest_timestamp_in_window,
thrust::greater<decltype(highest_timestamp_in_window)>())) +
1; // Add 1, for `preceding` to account for current row.
};
auto following_calculator =
[d_group_offsets = group_offsets.data().get(),
d_group_labels = group_labels.data().get(),
d_timestamps = timestamp_column.data<TimeT>(),
d_nulls_begin = null_start.data().get(),
d_nulls_end = null_end.data().get(),
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
auto group_end = d_group_offsets[group_label + 1];
auto nulls_begin = d_nulls_begin[group_label];
auto nulls_end = d_nulls_end[group_label];
if (following_window_is_unbounded) { return (group_end - idx) - 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window ends at the end of the null group.
return nulls_end - idx - 1;
}
// timestamp[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search ends at group_end.
// 2. NO NULLS: Search ends at group_end.
// Otherwise, NULLS LAST ordering. Search ends at nulls_begin.
auto search_end = nulls_begin == group_start ? group_end : nulls_begin;
auto lowest_timestamp_in_window = d_timestamps[idx] - following_window;
return (thrust::upper_bound(thrust::seq,
d_timestamps + idx,
d_timestamps + search_end,
lowest_timestamp_in_window,
thrust::greater<decltype(lowest_timestamp_in_window)>()) -
(d_timestamps + idx)) -
1;
};
if (aggr->kind == aggregation::CUDA || aggr->kind == aggregation::PTX) {
CUDF_FAIL("Time ranged rolling window does NOT (yet) support UDF.");
} else {
return cudf::detail::rolling_window(
input,
empty_like(input)->view(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
preceding_calculator),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
following_calculator),
min_periods,
aggr,
rmm::cuda_stream_default,
mr);
}
}
template <typename TimeT>
std::unique_ptr<column> grouped_time_range_rolling_window_impl(
column_view const& input,
column_view const& timestamp_column,
cudf::order const& timestamp_ordering,
rmm::device_vector<cudf::size_type> const& group_offsets,
rmm::device_vector<cudf::size_type> const& group_labels,
window_bounds preceding_window_in_days, // TODO: Consider taking offset-type as type_id. Assumes
// days for now.
window_bounds following_window_in_days,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
TimeT mult_factor{static_cast<TimeT>(multiplication_factor(timestamp_column.type()))};
if (timestamp_ordering == cudf::order::ASCENDING) {
return group_offsets.empty()
? time_range_window_ASC(input,
timestamp_column,
preceding_window_in_days.value * mult_factor,
preceding_window_in_days.is_unbounded,
following_window_in_days.value * mult_factor,
following_window_in_days.is_unbounded,
min_periods,
aggr,
mr)
: time_range_window_ASC(input,
timestamp_column,
group_offsets,
group_labels,
preceding_window_in_days.value * mult_factor,
preceding_window_in_days.is_unbounded,
following_window_in_days.value * mult_factor,
following_window_in_days.is_unbounded,
min_periods,
aggr,
mr);
} else {
return group_offsets.empty()
? time_range_window_DESC(input,
timestamp_column,
preceding_window_in_days.value * mult_factor,
preceding_window_in_days.is_unbounded,
following_window_in_days.value * mult_factor,
following_window_in_days.is_unbounded,
min_periods,
aggr,
mr)
: time_range_window_DESC(input,
timestamp_column,
group_offsets,
group_labels,
preceding_window_in_days.value * mult_factor,
preceding_window_in_days.is_unbounded,
following_window_in_days.value * mult_factor,
following_window_in_days.is_unbounded,
min_periods,
aggr,
mr);
}
}
} // namespace
std::unique_ptr<column> grouped_time_range_rolling_window(table_view const& group_keys,
column_view const& timestamp_column,
cudf::order const& timestamp_order,
column_view const& input,
size_type preceding_window_in_days,
size_type following_window_in_days,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_time_range_rolling_window(group_keys,
timestamp_column,
timestamp_order,
input,
window_bounds::get(preceding_window_in_days),
window_bounds::get(following_window_in_days),
min_periods,
aggr,
mr);
}
std::unique_ptr<column> grouped_time_range_rolling_window(table_view const& group_keys,
column_view const& timestamp_column,
cudf::order const& timestamp_order,
column_view const& input,
window_bounds preceding_window_in_days,
window_bounds following_window_in_days,
size_type min_periods,
std::unique_ptr<aggregation> const& aggr,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (input.is_empty()) return empty_like(input);
CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()),
"Size mismatch between group_keys and input vector.");
CUDF_EXPECTS((min_periods > 0), "min_periods must be positive");
using sort_groupby_helper = cudf::groupby::detail::sort::sort_groupby_helper;
using index_vector = sort_groupby_helper::index_vector;
index_vector group_offsets, group_labels;
if (group_keys.num_columns() > 0) {
sort_groupby_helper helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES};
group_offsets = helper.group_offsets();
group_labels = helper.group_labels();
}
// Assumes that `timestamp_column` is actually of a timestamp type.
CUDF_EXPECTS(is_supported_range_frame_unit(timestamp_column.type()),
"Unsupported data-type for `timestamp`-based rolling window operation!");
return timestamp_column.type().id() == cudf::type_id::TIMESTAMP_DAYS
? grouped_time_range_rolling_window_impl<int32_t>(input,
timestamp_column,
timestamp_order,
group_offsets,
group_labels,
preceding_window_in_days,
following_window_in_days,
min_periods,
aggr,
mr)
: grouped_time_range_rolling_window_impl<int64_t>(input,
timestamp_column,
timestamp_order,
group_offsets,
group_labels,
preceding_window_in_days,
following_window_in_days,
min_periods,
aggr,
mr);
}
} // namespace cudf
|
4e33e93f3964fd709d39a2a760458985601ed706.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/count.h>
#include <thrust/sort.h>
#include <thrust/set_operations.h>
#include <thrust/gather.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/adjacent_difference.h>
#include <thrust/transform.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <iostream>
#include <sstream>
#include <stdio.h>
#include <fstream>
#include <iomanip>
#include <queue>
#include <set>
#include <stack>
#include <string>
#include <map>
#include <ctime>
#ifdef _WIN64
#include <process.h>
#include <windows.h>
#else
#include <pthread.h>
#endif
#include "cm.h"
#include "atof.h"
#include "itoa.h"
#include "compress.cu"
#include "/home/liuexp/Downloads/cudpp_src_2.0/include/cudpp_hash.h"
#ifdef _WIN64
#define fseeko _fseeki64
#define ftello _ftelli64
#else
#define _FILE_OFFSET_BITS 64
#define fseeko fseek
#define ftello ftell
#endif
using namespace std;
using namespace thrust::placeholders;
unsigned int process_count;
long long int runningRecs = 0;
long long int totalRecs = 0;
bool fact_file_loaded = 0;
bool buffersEmpty = 0;
unsigned long long int total_count = 0;
unsigned int total_segments = 0;
unsigned int total_max;
void* d_v = NULL;
void* s_v = NULL;
unsigned int curr_segment = 0;
map<string,queue<string> > top_type;
map<string,queue<string> > top_value;
map<string,queue<int_type> > top_nums;
map<string,queue<float_type> > top_nums_f;
template <typename HeadFlagType>
struct head_flag_predicate
: public thrust::binary_function<HeadFlagType,HeadFlagType,bool>
{
__host__ __device__
bool operator()(HeadFlagType left, HeadFlagType right) const
{
return !left;
}
};
struct is_match
{
__host__ __device__
bool operator()(unsigned int x)
{
return x != 4294967295;
}
};
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return !(((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
struct l_to_ui
{
__host__ __device__
float_type operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct float_to_decimal
{
__host__ __device__
float_type operator()(const float_type x)
{
return (int_type)(x*100);
}
};
struct to_zero
{
__host__ __device__
bool operator()(const int_type x)
{
if(x == -1)
return 0;
else
return 1;
}
};
struct div_long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x, const float_type y)
{
return (float_type)x/y;
}
};
struct float_to_long
{
__host__ __device__
long long int operator()(const float_type x)
{
if ((long long int)((x+EPSILON)*100.0) > (long long int)(x*100.0))
return (long long int)((x+EPSILON)*100.0);
else return (long long int)(x*100.0);
}
};
struct long_to_float
{
__host__ __device__
float_type operator()(const long long int x)
{
return (((float_type)x)/100.0);
}
};
struct Uint2Sum
{
__host__ __device__ uint2 operator()(uint2& a, uint2& b)
{
//a.x += b.x;
a.y += b.y;
return a;
}
};
struct uint2_split
{
const uint2* d_res;
unsigned int * output;
uint2_split(const uint2* _d_res, unsigned int * _output):
d_res(_d_res), output(_output) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
output[i] = d_res[i].y;
}
};
struct join_functor
{
const uint2* d_res;
const unsigned int* d_addr;
unsigned int * output;
unsigned int * output1;
join_functor(const uint2* _d_res, const unsigned int * _d_addr, unsigned int * _output, unsigned int * _output1):
d_res(_d_res), d_addr(_d_addr), output(_output), output1(_output1) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
if (d_res[i].x || d_res[i].y) {
for(unsigned int z = 0; z < d_res[i].y; z++) {
output[d_addr[i] + z] = i;
output1[d_addr[i] + z] = d_res[i].x + z;
};
};
}
};
struct cmp_functor
{
const char * src;
int_type * output;
const char * str;
const unsigned int * len;
cmp_functor(const char * _src, int_type * _output, const char * _str, const unsigned int * _len):
src(_src), output(_output), str(_str), len(_len) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
if(src[i] != 0 && output[i] >= 0 && output[i] < *len ) {
if ( src[i] == str[(*len-output[i]) - 1])
output[i]++;
else
output[i] = -1;
};
}
};
class CudaSet;
void LoadBuffers(void* file_name);
void* LoadBuffers1(void* file_name);
void allocColumns(CudaSet* a, queue<string> fields);
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment);
void copyGatherJoin(CudaSet* a, thrust::device_ptr<unsigned int>& m, string field, unsigned int segment, unsigned int& cnt);
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string,string> setMap; //map to keep track of column names and set names
CudaSet* bck;
queue<string> bck_fields;
bool all_copied = 0;
unsigned int bck_segment;
unsigned int findSegmentCount(char* file_name);
CudaSet *th;
bool buffersLoaded;
size_t getFreeMem();
bool zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a);
class CudaChar
{
public:
std::vector<thrust::host_vector<char> > h_columns;
std::vector<thrust::device_vector<char> > d_columns;
thrust::host_vector<char> compressed;
unsigned int mColumnCount;
unsigned int mRecCount;
CudaChar(unsigned int columnCount, unsigned int Recs)
: mColumnCount(0),
mRecCount(0)
{
initialize(columnCount, Recs);
}
CudaChar(unsigned int columnCount, unsigned int Recs, bool gpu)
: mColumnCount(0),
mRecCount(0)
{
initialize(columnCount, Recs, gpu);
}
CudaChar(unsigned int columnCount, unsigned int Recs, bool gpu, long long int compressed_size)
: mColumnCount(0),
mRecCount(0)
{
initialize(columnCount, Recs, gpu, compressed_size);
}
void findMinMax(string& minStr, string& maxStr)
{
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(mRecCount);
thrust::sequence(permutation, permutation+mRecCount);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, mRecCount));
for(int j=mColumnCount-1; j>=0 ; j--)
update_permutation(d_columns[j], raw_ptr, mRecCount, "ASC", (char*)temp);
minStr = "";
maxStr = "";
for(unsigned int j=0; j<mColumnCount; j++) {
minStr+=(d_columns[j])[permutation[0]];
maxStr+=(d_columns[j])[permutation[mRecCount-1]];
};
hipFree(temp);
hipFree(raw_ptr);
}
void resize(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++)
h_columns[i].resize(mRecCount);
}
void allocOnDevice(unsigned int RecordCount)
{
mRecCount = RecordCount;
for(unsigned int i=0; i <mColumnCount; i++)
d_columns[i].resize(mRecCount);
}
void deAllocOnDevice()
{
if (d_columns.size())
for(unsigned int i=0; i <mColumnCount; i++) {
d_columns[i].resize(0);
d_columns[i].shrink_to_fit();
};
};
void CopyToGpu(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++)
thrust::copy(h_columns[i].begin() + offset, h_columns[i].begin() + offset +count, d_columns[i].begin());
};
void CopyToHost(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++)
thrust::copy(d_columns[i].begin(), d_columns[i].begin() + count, h_columns[i].begin() + offset);
};
bool* cmpStr(string str)
{
if (str[str.size()-1] == '%' && str[0] == '%') { // contains
if(str.size() > mColumnCount) {
thrust::device_ptr<bool> res_f = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(res_f, res_f+mRecCount, 0, 0);
return thrust::raw_pointer_cast(res_f);
}
else {
return 0;
};
}
else if(str[str.size()-1] == '%') { // startsWith
if(str.size() > mColumnCount) {
thrust::device_ptr<bool> res_f = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(res_f, res_f+mRecCount, 0, 0);
return thrust::raw_pointer_cast(res_f);
}
else {
thrust::device_ptr<bool> v = thrust::device_malloc<bool>(mRecCount);
str.erase(str.size()-1,1);
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(res, res+mRecCount, 1, 0);
for(int i = 0; i < str.size()-1; i++) {
thrust::transform(d_columns[i].begin(), d_columns[i].begin()+mRecCount, thrust::constant_iterator<char>(str[i]), v, thrust::equal_to<char>());
thrust::transform(v, v+mRecCount, res, res, thrust::logical_and<bool>());
};
thrust::device_free(v);
return thrust::raw_pointer_cast(res);
};
}
else if(str[0] == '%' ) { // endsWith
str.erase(0,1);
thrust::device_ptr<char> dev_str = thrust::device_malloc<char>(str.size());
thrust::device_ptr<unsigned int> len = thrust::device_malloc<unsigned int>(1);
thrust::device_ptr<int_type> output = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(output, output+mRecCount, 0, 0);
len[0] = str.size();
for(int z=0; z < str.size(); z++)
dev_str[z] = str[z];
for(int i = mColumnCount-1; i >= 0; i--) {
thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0);
cmp_functor ff(thrust::raw_pointer_cast(d_columns[i].data()),
thrust::raw_pointer_cast(output),
thrust::raw_pointer_cast(dev_str),
thrust::raw_pointer_cast(len));
thrust::for_each(begin, begin + mRecCount, ff);
};
thrust::transform(output, output+mRecCount, res, to_zero());
return thrust::raw_pointer_cast(res);
}
else { // equal
thrust::device_ptr<bool> v = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(res, res+mRecCount, 1, 0);
if(mColumnCount < str.length())
{
thrust::sequence(res, res+mRecCount, 0, 0);
return thrust::raw_pointer_cast(res);
};
for(unsigned int i = 0; i < mColumnCount; i++) {
if (str.length() >= i+1)
thrust::transform(d_columns[i].begin(), d_columns[i].begin()+mRecCount, thrust::constant_iterator<char>(str[i]), v, thrust::equal_to<char>());
else
thrust::transform(d_columns[i].begin(), d_columns[i].begin()+mRecCount, thrust::constant_iterator<char>(0), v, thrust::equal_to<char>());
thrust::transform(v, v+mRecCount, res, res, thrust::logical_and<int_type>());
};
thrust::device_free(v);
return thrust::raw_pointer_cast(res);
};
};
protected: // methods
void initialize(unsigned int columnCount, unsigned int Recs)
{
mColumnCount = columnCount;
mRecCount = Recs;
for(unsigned int i=0; i <mColumnCount; i++) {
h_columns.push_back(thrust::host_vector<char>(Recs));
d_columns.push_back(thrust::device_vector<char>());
};
};
void initialize(unsigned int columnCount, unsigned int Recs, bool gpu)
{
mColumnCount = columnCount;
mRecCount = Recs;
for(unsigned int i=0; i <mColumnCount; i++) {
h_columns.push_back(thrust::host_vector<char>());
d_columns.push_back(thrust::device_vector<char>());
};
};
void initialize(unsigned int columnCount, unsigned int Recs, bool gpu, long long int compressed_size)
{
mColumnCount = columnCount;
mRecCount = Recs;
for(unsigned int i=0; i <mColumnCount; i++) {
h_columns.push_back(thrust::host_vector<char>());
d_columns.push_back(thrust::device_vector<char>());
};
compressed.resize(compressed_size);
};
};
class CudaSet
{
public:
std::vector<thrust::host_vector<int_type> > h_columns_int;
std::vector<thrust::host_vector<float_type> > h_columns_float;
std::vector<thrust::host_vector<char> > h_columns_char;
std::vector<CudaChar*> h_columns_cuda_char;
std::vector<thrust::device_vector<int_type> > d_columns_int;
std::vector<thrust::device_vector<float_type> > d_columns_float;
thrust::device_vector<unsigned int> prm_d;
map<string, std::vector<unsigned int*> > prm; //represents an op's permutation of original data vectors
//string is a set name
//unsigned int* is an adress of the permutation array
map<string, std::vector<unsigned int> > prm_count; // counts of prm permutations
map<unsigned int, unsigned int> type_index;
unsigned int mColumnCount;
unsigned int mRecCount;
map<string,int> columnNames;
map<string, FILE*> filePointers;
bool *grp;
queue<string> columnGroups;
bool fact_table; // 1 = host recs are not compressed, 0 = compressed
FILE *file_p;
unsigned long long int *offsets; // to store the current offsets for compression routines
unsigned int *seq;
bool keep;
unsigned int segCount, maxRecs;
string name;
//CudaSet* filter_ref;
char* load_file_name;
unsigned int oldRecCount;
unsigned int* type; // 0 - integer, 1-float_type, 2-char
bool* decimal; // column is decimal - affects only compression
unsigned int* grp_type; // type of group : SUM, AVG, COUNT etc
unsigned int* cols; // column positions in a file
unsigned int grp_count;
bool partial_load;
bool isJoined;
CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
: mColumnCount(0),
mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
keep = false;
offsets = 0;
partial_load = 0;
isJoined = 0;
}
CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name)
: mColumnCount(0),
mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
keep = false;
offsets = 0;
partial_load = 1;
isJoined = 0;
}
CudaSet(unsigned int RecordCount, unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
offsets = 0;
partial_load = 0;
isJoined = 0;
};
CudaSet(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b,Recs, op_sel, op_sel_as);
keep = false;
partial_load = 0;
isJoined = 1;
};
~CudaSet()
{
free();
}
void resize(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0)
h_columns_int[type_index[i]].resize(mRecCount);
else if(type[i] == 1)
h_columns_float[type_index[i]].resize(mRecCount);
else
h_columns_cuda_char[type_index[i]]->resize(addRecs);
};
}
void allocColumnOnDevice(unsigned int colIndex, unsigned int RecordCount)
{
if (type[colIndex] == 0)
d_columns_int[type_index[colIndex]].resize(RecordCount);
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(RecordCount);
else
h_columns_cuda_char[type_index[colIndex]]->allocOnDevice(RecordCount);
};
void deAllocColumnOnDevice(unsigned int colIndex)
{
if (type[colIndex] == 0 && d_columns_int.size()) {
d_columns_int[type_index[colIndex]].resize(0);
d_columns_int[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 1 && d_columns_float.size()) {
d_columns_float[type_index[colIndex]].resize(0);
d_columns_float[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 2 && h_columns_cuda_char.size())
h_columns_cuda_char[type_index[colIndex]]->deAllocOnDevice();
};
void setTypes(CudaSet* b)
{
for(unsigned int i=0; i < b->mColumnCount; i++)
type[i] = b->type[i];
};
void allocOnDevice(unsigned int RecordCount)
{
for(unsigned int i=0; i < mColumnCount; i++)
allocColumnOnDevice(i, RecordCount);
};
void deAllocOnDevice()
{
for(unsigned int i=0; i <mColumnCount; i++)
deAllocColumnOnDevice(i);
if(!columnGroups.empty() && mRecCount !=0) {
hipFree(grp);
grp = NULL;
};
};
void resizeDeviceColumn(unsigned int RecCount, unsigned int colIndex)
{
if (RecCount) {
if (type[colIndex] == 0)
d_columns_int[type_index[colIndex]].resize(mRecCount+RecCount);
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(mRecCount+RecCount);
else {
for(unsigned int i = 0; i < h_columns_cuda_char[type_index[colIndex]]->mColumnCount; i++)
(h_columns_cuda_char[type_index[colIndex]]->d_columns[i]).resize(mRecCount+RecCount);
};
};
};
void resizeDevice(unsigned int RecCount)
{
if (RecCount)
for(unsigned int i=0; i < mColumnCount; i++)
resizeDeviceColumn(RecCount, i);
};
bool onDevice(unsigned int i)
{
unsigned j = type_index[i];
if (type[i] == 0) {
if (!d_columns_int.size())
return 0;
if (d_columns_int[j].size() == 0)
return 0;
}
else if (type[i] == 1) {
if (!d_columns_float.size())
return 0;
if(d_columns_float[j].size() == 0)
return 0;
}
else if (type[i] == 2) {
if(!h_columns_cuda_char.size())
return 0;
if(h_columns_cuda_char[j]->d_columns[0].size() == 0)
return 0;
};
return 1;
}
CudaSet* copyStruct(unsigned int mCount)
{
CudaSet* a = new CudaSet(mCount, mColumnCount);
a->fact_table = fact_table;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if (a->type[i] == 0) {
a->h_columns_int.push_back(thrust::host_vector<int_type>(mCount));
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->type_index[i] = a->h_columns_int.size()-1;
}
else if (a->type[i] == 1) {
a->h_columns_float.push_back(thrust::host_vector<float_type>(mCount));
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->type_index[i] = a->h_columns_float.size()-1;
}
else {
a->h_columns_cuda_char.push_back(new CudaChar((h_columns_cuda_char[type_index[i]])->mColumnCount, mCount));
a->type_index[i] = a->h_columns_cuda_char.size()-1;
};
};
return a;
}
CudaSet* copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->fact_table = fact_table;
a->segCount = segCount;
a->maxRecs = 0;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if(a->type[i] == 0) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->h_columns_int.push_back(thrust::host_vector<int_type>());
a->type_index[i] = a->d_columns_int.size()-1;
}
else if(a->type[i] == 1) {
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->h_columns_float.push_back(thrust::host_vector<float_type>());
a->type_index[i] = a->d_columns_float.size()-1;
a->decimal[i] = decimal[i];
}
else {
a->h_columns_cuda_char.push_back(new CudaChar((h_columns_cuda_char[type_index[i]])->mColumnCount, mRecCount, 0));
a->type_index[i] = a->h_columns_cuda_char.size()-1;
};
};
if(!a->fact_table) {
a->offsets = new unsigned long long int[mColumnCount];
for(unsigned int i =0; i < mColumnCount; i++)
a->offsets[i] = 0;
};
a->mRecCount = 0;
return a;
}
unsigned long long int readSegmentsFromFile(unsigned int segNum, unsigned int colIndex)
{
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
FILE* f;
int cnt, grp_count;
unsigned long long int offset = 0;
f = fopen (f1 , "rb" );
// cout << "file " << f1 << " " << segNum << endl;
for(unsigned int i = 0; i < segNum; i++) {
if(type[colIndex] != 2) {
fread((char *)&cnt, 4, 1, f);
offset = offset + cnt + 8;
fseeko(f, offset*8 , SEEK_SET);
}
else {
fread((char *)&cnt, 4, 1, f);
offset = offset + cnt*8 + 12;
fseeko(f, offset , SEEK_SET);
fread((char *)&grp_count, 4, 1, f);
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
offset = offset + 11*4 + grp_count*c->mColumnCount;
fseeko(f, offset , SEEK_SET);
};
};
// find out how much we need to read and rewind back to the start of the segment
if(type[colIndex] != 2) {
fread((char *)&cnt, 4, 1, f);
fseeko(f, -4 , SEEK_CUR);
}
else {
fread((char *)&cnt, 4, 1, f);
offset = cnt*8 + 8;
fseeko(f, offset , SEEK_CUR);
fread((char *)&grp_count, 4, 1, f);
fseeko(f, -(cnt*8+16) , SEEK_CUR);
};
// resize the host arrays if necessary
// and read the segment from a file
if(type[colIndex] == 0) {
if(h_columns_int[type_index[colIndex]].size() < cnt+9) {
//resize(cnt+9-h_columns_int[type_index[colIndex]].size());
h_columns_int[type_index[colIndex]].resize(cnt+9);
};
fread(h_columns_int[type_index[colIndex]].data(),(cnt+8)*8,1,f);
}
else if(type[colIndex] == 1) {
if(h_columns_float[type_index[colIndex]].size() < cnt+9) {
//resize(cnt+9-h_columns_int[type_index[colIndex]].size());
h_columns_float[type_index[colIndex]].resize(cnt+9);
};
fread(h_columns_float[type_index[colIndex]].data(),(cnt+8)*8,1,f);
}
else {
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
if(c->compressed.size() < cnt*8 + 14*4 + grp_count*c->mColumnCount)
c->compressed.resize(cnt*8 + 14*4 + grp_count*c->mColumnCount);
fread(c->compressed.data(), cnt*8 + 14*4 + grp_count*c->mColumnCount,1,f);
};
fclose(f);
return 0;
}
unsigned long long int readSegments(unsigned int segNum, unsigned int colIndex) // read segNum number of segments and return the offset of the next segment
{
unsigned long long int offset = 0; // offset measured in bytes if checking chars and in 8 byte integers if checking ints and decimals
unsigned int grp_count;
unsigned int data_len;
for(unsigned int i = 0; i < segNum; i++) {
if(type[colIndex] == 0) {
data_len = ((unsigned int*)((h_columns_int[type_index[colIndex]]).data() + offset))[0];
offset = offset + data_len + 8;
}
else if(type[colIndex] == 1) {
data_len = ((unsigned int*)((h_columns_float[type_index[colIndex]]).data() + offset))[0];
offset = offset + data_len + 8;
}
else {
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
data_len = ((unsigned int*)(c->compressed.data() + offset))[0];
grp_count = ((unsigned int*)(c->compressed.data() + offset + 8*data_len + 12))[0];
offset = offset + data_len*8 + 14*4 + grp_count*c->mColumnCount;
};
};
return offset;
}
void CopyToGpu(unsigned int offset, unsigned int count)
{
if (fact_table) {
for(unsigned int i = 0; i < mColumnCount; i++) {
switch(type[i]) {
case 0 :
thrust::copy(h_columns_int[type_index[i]].begin() + offset, h_columns_int[type_index[i]].begin() + offset + count, d_columns_int[type_index[i]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[i]].begin() + offset, h_columns_float[type_index[i]].begin() + offset + count, d_columns_float[type_index[i]].begin());
break;
default :
(h_columns_cuda_char[type_index[i]])->CopyToGpu(offset, count);
};
};
}
else
for(unsigned int i = 0; i < mColumnCount; i++)
CopyColumnToGpu(i, offset, count);
}
void CopyToGpu(unsigned int segment)
{
if (fact_table) {
for(unsigned int i = 0; i < mColumnCount; i++) {
switch(type[i]) {
case 0 :
thrust::copy(h_columns_int[type_index[i]].begin(), h_columns_int[type_index[i]].begin() + mRecCount, d_columns_int[type_index[i]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[i]].begin(), h_columns_float[type_index[i]].begin() + mRecCount, d_columns_float[type_index[i]].begin());
break;
default :
(h_columns_cuda_char[type_index[i]])->CopyToGpu(0, mRecCount);
};
};
}
else
for(unsigned int i = 0; i < mColumnCount; i++)
CopyColumnToGpu(i, segment);
}
void CopyColumnToGpu(unsigned int colIndex, unsigned int segment)
{
if(fact_table) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
(h_columns_cuda_char[type_index[colIndex]])->CopyToGpu(0, mRecCount);
};
}
else {
//cout << "start " << colIndex << " " << type[colIndex] << " " << segment << " " << partial_load << endl;
unsigned long long int data_offset;
if (partial_load)
data_offset = readSegmentsFromFile(segment,colIndex);
else
data_offset = readSegments(segment,colIndex);
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
switch(type[colIndex]) {
case 0 :
pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data()), h_columns_int[type_index[colIndex]].data() + data_offset, &mRecCount, 0, NULL, d_v, s_v);
break;
case 1 :
if(decimal[colIndex]) {
pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()) , h_columns_float[type_index[colIndex]].data() + data_offset, &mRecCount, 0, NULL, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin(), long_to_float());
}
//else // uncompressed float
//hipMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, hipMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
break;
default :
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
unsigned int data_len = ((unsigned int*)(c->compressed.data() + data_offset))[0];
grp_count = ((unsigned int*)(c->compressed.data() + data_offset + data_len*8 + 12))[0];
pfor_dict_decompress(c->compressed.data() + data_offset, c->h_columns , c->d_columns, &mRecCount, NULL,0, c->mColumnCount, 0, d_v, s_v);
};
//hipFree(d_v);
//hipFree(s_v);
};
}
void CopyColumnToGpu(unsigned int colIndex) // copy all segments
{
if(fact_table) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
(h_columns_cuda_char[type_index[colIndex]])->CopyToGpu(0, mRecCount);
};
}
else {
long long int data_offset;
unsigned int totalRecs = 0;
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
for(unsigned int i = 0; i < segCount; i++) {
if (partial_load)
data_offset = readSegmentsFromFile(i,colIndex);
else
data_offset = readSegments(i,colIndex);
switch(type[colIndex]) {
case 0 :
pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + totalRecs), h_columns_int[type_index[colIndex]].data() + data_offset, &mRecCount, 0, NULL, d_v, s_v);
break;
case 1 :
if(decimal[colIndex]) {
pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs) , h_columns_float[type_index[colIndex]].data() + data_offset, &mRecCount, 0, NULL, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + totalRecs, long_to_float());
}
// else uncompressed float
//hipMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, hipMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
break;
default :
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
pfor_dict_decompress(c->compressed.data() + data_offset, c->h_columns , c->d_columns, &mRecCount, NULL,0, c->mColumnCount, totalRecs, d_v, s_v);
};
totalRecs = totalRecs + mRecCount;
};
hipFree(d_v);
hipFree(s_v);
mRecCount = totalRecs;
};
}
void CopyColumnToGpu(unsigned int colIndex, unsigned int offset, unsigned int count)
{
if(fact_table) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + offset + count, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + offset + count, d_columns_float[type_index[colIndex]].begin());
break;
default :
(h_columns_cuda_char[type_index[colIndex]])->CopyToGpu(offset, count);
};
}
else {
unsigned int start_seg, seg_num, grp_count, data_len, mCount;
start_seg = offset/segCount; // starting segment
seg_num = count/segCount; // number of segments that we need
long long int data_offset;
if(partial_load)
data_offset = readSegmentsFromFile(start_seg,colIndex);
else
data_offset = readSegments(start_seg,colIndex);
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
switch(type[colIndex]) {
case 0 :
for(unsigned int j = 0; j < seg_num; j++) {
data_len = ((unsigned int*)(h_columns_int[type_index[colIndex]].data()))[data_offset];
pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + segCount*j), h_columns_int[type_index[colIndex]].data() + data_offset, &data_len, 0, NULL, d_v, s_v);
data_offset = data_offset + data_len + 8;
};
break;
case 1 :
if(decimal[colIndex]) {
for(unsigned int j = 0; j < seg_num; j++) {
data_len = (((unsigned int*)(h_columns_int[type_index[colIndex]]).data()))[data_offset];
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + segCount*j));
pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + segCount*j), h_columns_float[type_index[colIndex]].data() + data_offset, &data_len, 0, NULL, d_v, s_v);
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + segCount*j, long_to_float());
data_offset = data_offset + data_len + 8;
};
}
else // uncompressed float
thrust::copy(h_columns_float[type_index[colIndex]].begin() + offset, h_columns_float[type_index[colIndex]].begin() + offset + count, d_columns_float[type_index[colIndex]].begin());
break;
default :
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
for(unsigned int j = 0; j < seg_num; j++) {
data_len = ((unsigned int*)(c->compressed.data() + data_offset))[0];
grp_count = ((unsigned int*)(c->compressed.data() + data_offset + data_len*8 + 12))[0];
pfor_dict_decompress(c->compressed.data() + data_offset, c->h_columns , c->d_columns, &mCount, NULL,0, c->mColumnCount, segCount*j, d_v, s_v);
data_offset = data_offset + data_len*8 + 14*4 + grp_count*c->mColumnCount;
};
};
hipFree(d_v);
hipFree(s_v);
};
}
void CopyColumnToHost(int colIndex, unsigned int offset, unsigned int RecCount)
{
if(fact_table) {
switch(type[colIndex]) {
case 0 :
thrust::copy(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + RecCount, h_columns_int[type_index[colIndex]].begin() + offset);
break;
case 1 :
thrust::copy(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + RecCount, h_columns_float[type_index[colIndex]].begin() + offset);
break;
default :
(h_columns_cuda_char[type_index[colIndex]])->CopyToHost(offset,RecCount);
}
}
else {
unsigned long long int comp_offset = 0;
switch(type[colIndex]) {
case 0 :
comp_offset = pfor_compress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data()), RecCount*int_size, NULL, h_columns_int[type_index[colIndex]], 0, comp_offset);
break;
case 1 :
if (decimal[colIndex]) {
thrust::device_ptr<long long int> d_col_dec((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() ));
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin()+RecCount,
d_col_dec, float_to_long());
comp_offset = pfor_compress(thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()), RecCount*float_size, NULL, h_columns_float[type_index[colIndex]], 0, comp_offset);
}
else { // add code for float
} ;
break;
default :
CudaChar *s = (h_columns_cuda_char)[type_index[colIndex]];
comp_offset = pfor_dict_compress(s->d_columns, s->mColumnCount, NULL, RecCount, s->compressed, comp_offset);
};
};
}
void CopyColumnToHost(int colIndex)
{
CopyColumnToHost(colIndex, 0, mRecCount);
}
void CopyToHost(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++)
CopyColumnToHost(i, offset, count);
}
float_type* get_float_type_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data());
}
int_type* get_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data());
}
float_type* get_host_float_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_float[type_index[colIndex]].data());
}
int_type* get_host_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_int[type_index[colIndex]].data());
}
void GroupBy(queue<string> columnRef)
{
int grpInd, colIndex;
if(!columnGroups.empty())
hipFree(grp);
CUDA_SAFE_CALL(hipMalloc((void **) &grp, mRecCount * sizeof(bool))); // d_di is the vector for segmented scans
thrust::device_ptr<bool> d_grp(grp);
thrust::sequence(d_grp, d_grp+mRecCount, 0, 0);
thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(mRecCount);
d_group[mRecCount-1] = 1;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
columnGroups.push(columnRef.front()); // save for future references
colIndex = columnNames[columnRef.front()];
if(!onDevice(colIndex)) {
allocColumnOnDevice(colIndex,mRecCount);
CopyColumnToGpu(colIndex, 0, mRecCount);
grpInd = 1;
}
else
grpInd = 0;
if (type[colIndex] == 0) { // int_type
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
}
else if (type[colIndex] == 1) { // float_type
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_float[type_index[colIndex]].begin()+1, d_group, f_not_equal_to());
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
}
else { // CudaChar
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
for(unsigned int j=0; j < c->mColumnCount; j++) {
thrust::transform(c->d_columns[j].begin(), c->d_columns[j].begin() + mRecCount - 1, c->d_columns[j].begin()+1, d_group, thrust::not_equal_to<char>());
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<int>());
}
};
if (grpInd == 1)
deAllocColumnOnDevice(colIndex);
};
thrust::device_free(d_group);
grp_count = thrust::count(d_grp, d_grp+mRecCount,1);
}
void addDeviceColumn(int_type* col, int colIndex, string colName, int_type recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
d_columns_int.push_back(thrust::device_vector<int_type>(recCount));
type_index[colIndex] = d_columns_int.size()-1;
}
else { // already exists, my need to resize it
if(mRecCount < recCount)
resizeDeviceColumn(colIndex, recCount-mRecCount);
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[type_index[colIndex]].begin());
mRecCount = recCount;
};
void addDeviceColumn(float_type* col, int colIndex, string colName, int_type recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
d_columns_float.push_back(thrust::device_vector<float_type>(recCount));
type_index[colIndex] = d_columns_float.size()-1;
}
else { // already exists, my need to resize it
if(mRecCount < recCount)
resizeDeviceColumn(colIndex, recCount-mRecCount);
};
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[type_index[colIndex]].begin());
mRecCount = recCount;
};
void addHostColumn(int_type* col, int colIndex, string colName, int_type recCount, int_type old_reccount, bool one_line)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
if (!one_line) {
h_columns_int.push_back(thrust::host_vector<int_type>(old_reccount));
type_index[colIndex] = h_columns_int.size()-1;
}
else {
h_columns_int.push_back(thrust::host_vector<int_type>(1));
type_index[colIndex] = h_columns_int.size()-1;
};
};
if (!one_line) {
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, h_columns_int[type_index[colIndex]].begin() + mRecCount);
}
else {
thrust::device_ptr<int_type> src(col);
(h_columns_int[type_index[colIndex]])[0] = (h_columns_int[type_index[colIndex]])[0] + src[0];
};
};
void addHostColumn(float_type* col, int colIndex, string colName, int_type recCount, int_type old_reccount, bool one_line)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
if (!one_line) {
h_columns_float.push_back(thrust::host_vector<float_type>(old_reccount));
type_index[colIndex] = h_columns_float.size()-1;
}
else {
h_columns_float.push_back(thrust::host_vector<float_type>(1));
type_index[colIndex] = h_columns_float.size()-1;
};
};
if (!one_line) {
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, h_columns_float[type_index[colIndex]].begin() + mRecCount);
}
else {
thrust::device_ptr<float_type> src(col);
(h_columns_float[type_index[colIndex]])[0] = (h_columns_float[type_index[colIndex]])[0] + src[0];
};
};
void Store(char* file_name, char* sep, unsigned int limit, bool binary )
{
if (mRecCount == 0 && binary == 1) { // write tails
char str[100];
char col_pos[3];
for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
fstream binary_file(str,ios::out|ios::binary|ios::app);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&total_segments, 4);
binary_file.write((char *)&total_max, 4);
binary_file.close();
};
return;
};
unsigned int mCount;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else
mCount = mRecCount;
if(binary == 0) {
if(prm.size() > 0) { // data permuted
// allocate on device and gather
queue<string> op_vx;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
op_vx.push((*it).first);
allocColumns(this, op_vx);
copyColumns(this, op_vx, 0);
};
FILE *file_pr = fopen(file_name, "w");
if (file_pr == NULL)
cout << "Could not open file " << file_name << endl;
char buffer [33];
if(onDevice(0)) {
if(h_columns_int.size() == 0 && h_columns_float.size() == 0) {
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 0)
h_columns_int.push_back(thrust::host_vector<int_type>(mCount));
else if(type[i] == 1)
h_columns_float.push_back(thrust::host_vector<float_type>(mCount));
};
resize(mCount+1);
bool ch = 0;
if(!fact_table) {
fact_table = 1;
ch = 1;
};
CopyToHost(0,mCount);
if(ch)
fact_table = 0;
}
else {
if(!fact_table) { // compressed on the host
allocOnDevice(mCount);
for(unsigned int i=0; i < mColumnCount; i++) {
CopyColumnToGpu(i);
resize(mCount+1);
};
fact_table = 1;
CopyToHost(0,mCount);
fact_table = 0;
};
};
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
CudaChar* cc = h_columns_cuda_char[type_index[j]];
char *buf = new char[(cc->mColumnCount)+1];
for(unsigned int z=0; z<(cc->mColumnCount); z++)
buf[z] = (cc->h_columns[z])[i];
buf[cc->mColumnCount] = 0;
fputs(buf, file_pr);
fputs(sep, file_pr);
delete [] buf;
};
};
if (i != mCount -1)
fputs("\n",file_pr);
};
fclose(file_pr);
}
else { //writing a binary file
char str[100];
char col_pos[3];
total_count = total_count + mCount;
total_segments = total_segments + 1;
if (mCount > total_max)
total_max = mCount;
bool in_gpu = false;
if(onDevice(0))
in_gpu = true;
void* d;
if(!in_gpu)
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
void* host;
hipHostMalloc(&host, float_size*mCount);
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2 && !in_gpu ) {
allocColumnOnDevice(i, mCount);
CopyColumnToGpu(i, 0, mCount);
};
for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
if(type[i] == 0) {
if(!in_gpu) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::copy(h_columns_int[type_index[i]].begin(), h_columns_int[type_index[i]].begin() + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
}
else
pfor_compress( thrust::raw_pointer_cast(d_columns_int[type_index[i]].data()), mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
}
else if(type[i] == 1) {
if(decimal[i]) {
if(!in_gpu) {
thrust::device_ptr<float_type> d_col((float_type*)d);
thrust::copy(h_columns_float[type_index[i]].begin(), h_columns_float[type_index[i]].begin() + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
}
else {
thrust::device_ptr<long long int> d_col_dec((long long int*)(thrust::raw_pointer_cast(d_columns_float[type_index[i]].data()) ));
thrust::transform(d_columns_float[type_index[i]].begin(),d_columns_float[type_index[i]].begin()+mCount, d_col_dec, float_to_long());
pfor_compress( thrust::raw_pointer_cast(d_columns_float[type_index[i]].data()), mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
};
}
else { // do not compress
fstream binary_file(str,ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
if(in_gpu) {
hipMemcpy(host, thrust::raw_pointer_cast(d_columns_float[type_index[i]].data()), mCount*float_size, hipMemcpyDeviceToHost);
binary_file.write((char *)host,mCount*float_size);
}
else
binary_file.write((char *)(h_columns_float[type_index[i]].data()),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
CudaChar *a = h_columns_cuda_char[type_index[i]];
thrust::host_vector<char> hh(mCount*8);
pfor_dict_compress(a->d_columns, a->mColumnCount, str, mCount, hh, 0);
};
if(fact_file_loaded) {
fstream binary_file(str,ios::out|ios::binary|ios::app);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&total_segments, 4);
binary_file.write((char *)&total_max, 4);
binary_file.close();
};
};
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2 && !in_gpu)
deAllocColumnOnDevice(i);
if(!in_gpu)
hipFree(d);
hipHostFree(host);
}
}
void LoadFile(char* file_name, char* sep )
{
unsigned int count = 0;
char line[500];
int l;
char* field;
unsigned int current_column = 1;
FILE *file_ptr = fopen(file_name, "r");
if (file_ptr == NULL)
cout << "Could not open file " << file_name << endl;
unsigned int *seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
while (fgets(line, 500, file_ptr) != NULL ) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoi(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoi(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {
l = strlen(field);
for(int j =0; j< l; j++)
((h_columns_cuda_char[type_index[seq[i]]])->h_columns[j])[count] = field[j];
for(unsigned int j =l; j< (h_columns_cuda_char[type_index[i]])->mColumnCount; j++)
((h_columns_cuda_char[type_index[seq[i]]])->h_columns[j])[count] = 0;
};
};
count++;
if (count == mRecCount) {
mRecCount = mRecCount + process_count;
resize(mRecCount);
};
};
fclose(file_ptr);
mRecCount = count;
}
int LoadBigFile(const char* file_name, const char* sep )
{
unsigned int count = 0;
char line[500];
char* field;
unsigned int current_column = 1;
unsigned int l;
if (file_p == NULL)
file_p = fopen(file_name, "r");
if (file_p == NULL)
cout << "Could not open file " << file_name << endl;
if (seq == 0) {
seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
};
while (count < process_count && fgets(line, 500, file_p) != NULL) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoi(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoi(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {
l = strlen(field);
for(unsigned int j =0; j< l; j++)
((h_columns_cuda_char[type_index[seq[i]]])->h_columns[j])[count] = field[j];
for(unsigned int j =l; j< (h_columns_cuda_char[type_index[seq[i]]])->mColumnCount; j++)
((h_columns_cuda_char[type_index[seq[i]]])->h_columns[j])[count] = 0;
};
};
count++;
};
mRecCount = count;
if(count < process_count) {
fclose(file_p);
return 1;
}
else
return 0;
}
void free() {
if (!seq)
delete seq;
for(unsigned int i = 0; i < mColumnCount; i++ ) {
if(type[i] == 2 && h_columns_cuda_char.size() > 0 && prm.size() == 0)
delete h_columns_cuda_char[type_index[i]];
};
delete type;
delete cols;
if(!columnGroups.empty() && mRecCount !=0 && grp != NULL)
hipFree(grp);
for ( map<string, std::vector<unsigned int*> >::iterator it=prm.begin() ; it != prm.end(); ++it ) {
for(unsigned int i = 0; i < prm[(*it).first].size(); i++)
delete [] prm[(*it).first][i];
};
};
bool* logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* compare(int_type* column1, int_type d, int_type op_type)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* compare(int_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);;
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* op(int_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* op(int_type* column1, int_type* column2, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
}
return thrust::raw_pointer_cast(temp);
}
float_type* op(float_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* op(int_type* column1, int_type d, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
};
return thrust::raw_pointer_cast(temp);
}
float_type* op(int_type* column1, float_type d, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
return thrust::raw_pointer_cast(temp);
}
float_type* op(float_type* column1, float_type d, string op_type,int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
protected: // methods
void initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name) // compressed data for DIM tables
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
unsigned int cnt, grp_count;
file_p = NULL;
FILE* f;
char f1[100];
fact_table = 0;
mRecCount = Recs;
load_file_name = file_name;
//std::clock_t start1 = std::clock();
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
strcpy(f1, file_name);
strcat(f1,".");
char col_pos[3];
itoaa(colsRef.front(),col_pos);
strcat(f1,col_pos); // read the size of a segment
f = fopen (f1 , "rb" );
fread((char *)&cnt, 4, 1, f);
// cout << "creating host " << cnt << endl;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type>(cnt + 9));
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
fseeko(f, cnt*8 + 12, SEEK_SET);
fread((char *)&grp_count, 4, 1, f);
h_columns_cuda_char.push_back(new CudaChar(sizeRef.front(), Recs, 0, cnt*8 + 14*4 + grp_count*sizeRef.front()));
type_index[i] = h_columns_cuda_char.size()-1;
};
fclose(f);
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
//std::cout<< "create time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
};
void initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
file_p = NULL;
mRecCount = Recs;
segCount = 1;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type>());
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_cuda_char.push_back(new CudaChar(sizeRef.front(), Recs, 1));
type_index[i] = h_columns_cuda_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void initialize(unsigned int RecordCount, unsigned int ColumnCount)
{
mRecCount = RecordCount;
mColumnCount = ColumnCount;
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
seq = 0;
for(unsigned int i =0; i < mColumnCount; i++)
cols[i] = i;
};
void initialize(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = Recs;
mColumnCount = op_sel.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
map<string,int>::iterator it;
map<int,string> columnNames1;
seq = 0;
unsigned int i = 0;
while(!op_sel_as.empty()) {
columnNames[op_sel_as.front()] = i;
op_sel_as.pop();
i++;
};
if (Recs != 0) {
unsigned int index;
for(unsigned int i=0; i < mColumnCount; i++) {
if((it = a->columnNames.find(op_sel.front())) != a->columnNames.end()) {
index = it->second;
cols[i] = i;
decimal[i] = a->decimal[i];
if ((a->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_cuda_char.push_back(new CudaChar((a->h_columns_cuda_char[a->type_index[index]])->mColumnCount, Recs, 1));
type[i] = 2;
type_index[i] = h_columns_cuda_char.size()-1;
};
}
else {
it = b->columnNames.find(op_sel.front());
index = it->second;
cols[i] = i;
decimal[i] = b->decimal[index];
if ((b->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((b->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_cuda_char.push_back(new CudaChar((b->h_columns_cuda_char[a->type_index[index]])->mColumnCount, Recs, 1));
type[i] = 2;
type_index[i] = h_columns_cuda_char.size()-1;
};
}
op_sel.pop();
};
};
}
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void* LoadBuffers1(void* file_name)
{
void* p = 0;
LoadBuffers(file_name);
return p;
}
void LoadBuffers(void* file_name)
{
char str[100];
char col_pos[3];
unsigned int cnt;
long long int lower_val, upper_val;
map<unsigned int,unsigned int> counts;
bool check_res = 0;
FILE* f;
while(runningRecs < totalRecs && !check_res) {
for(unsigned int i = 0; i< th->mColumnCount; i++) {
strcpy(str, (char*)file_name);
strcat(str,".");
itoaa(th->cols[i],col_pos);
strcat(str,col_pos);
if (th->filePointers.find(str) == th->filePointers.end())
th->filePointers[str] = fopen(str, "rb");
f = th->filePointers[str];
if (th->type[i] == 0 || (th->type[i] == 1 && th->decimal[i])) {
fread(&cnt, 4, 1, f);
counts[i] = cnt;
fread(&lower_val, 8, 1, f);
fread(&upper_val, 8, 1, f);
unsigned int max_resize;
if(cnt == 1)
max_resize = 2;
else
max_resize = cnt;
//cout << "segment upper lower " << upper_val << " " << lower_val << endl;
if (th->type[i] == 0) {
if(cnt > th->h_columns_int[th->type_index[i]].size())
th->h_columns_int[th->type_index[i]].resize(max_resize);
(th->h_columns_int[th->type_index[i]])[0] = lower_val;
(th->h_columns_int[th->type_index[i]])[1] = upper_val;
}
else {
if(cnt > th->h_columns_float[th->type_index[i]].size())
th->h_columns_float[th->type_index[i]].resize(max_resize);
(th->h_columns_float[th->type_index[i]])[0] = ((float_type)lower_val)/100.0;
(th->h_columns_float[th->type_index[i]])[1] = ((float_type)upper_val)/100.0;
};
}
};
if(!top_type[th->name].empty()) {
check_res = zone_map_check(top_type[th->name],top_value[th->name],top_nums[th->name],top_nums_f[th->name],th);
//cout << "check result " << check_res << endl;
if (!check_res) { // do not process segment, move the pointers to the next segment
runningRecs = runningRecs + th->maxRecs;
if (runningRecs >= totalRecs) {
buffersEmpty = 1;
buffersLoaded = 1;
return;
}
else {
// adjust file pointers
for(int z = 0; z< th->mColumnCount; z++) {
strcpy(str, (char*)file_name);
strcat(str,".");
itoaa(th->cols[z],col_pos);
strcat(str,col_pos);
f = th->filePointers[str];
if (th->type[z] == 0 || (th->type[z] == 1 && th->decimal[z]))
fseeko(f, counts[z]*8 + 44, SEEK_CUR);
else if (th->type[z] == 1 && !th->decimal[z])
fseeko(f, counts[z]*8 + 8, SEEK_CUR);
else {
unsigned int grp_count;
CudaChar *c = th->h_columns_cuda_char[th->type_index[z]];
fread(&cnt, 4, 1, f);
fseeko(f,cnt*8 + 8,SEEK_CUR);
fread(&grp_count, 4, 1, f);
fseeko(f,grp_count*c->mColumnCount,SEEK_CUR);
};
};
};
};
}
else
check_res = 1;
};
for(unsigned int i = 0; i< th->mColumnCount; i++) {
strcpy(str, (char*)file_name);
strcat(str,".");
itoaa(th->cols[i],col_pos);
strcat(str,col_pos);
f = th->filePointers[str];
if (th->type[i] == 0) {
//fread(&cnt, 4, 1, f);
//fread(&lower_val, 8, 1, f);
//fread(&upper_val, 8, 1, f);
fread(th->h_columns_int[th->type_index[i]].data(),counts[i]*8,1,f);
}
else if (th->type[i] == 1 && th->decimal[i]) {
fread(th->h_columns_float[th->type_index[i]].data(),counts[i]*8,1,f);
}
else if (th->type[i] == 1 && !th->decimal[i]) {
unsigned int grp_count;
fread(&cnt, 4, 1, f);
fread(th->h_columns_float[th->type_index[i]].data(),cnt*8,1,f);
fread(&grp_count, 4, 1, f);
}
else {
unsigned int grp_count;
CudaChar *c = th->h_columns_cuda_char[th->type_index[i]];
fread(&cnt, 4, 1, f);
if(!c->compressed.size())
c->compressed.resize(cnt*8);
fread(c->compressed.data(),cnt*8,1,f);
fread(&grp_count, 4, 1, f);
fread(&grp_count, 4, 1, f);
fread(&grp_count, 4, 1, f);
for(unsigned int j = 0; j < c->mColumnCount; j++) {
if(c->h_columns[j].size() < grp_count)
c->h_columns[j].resize(grp_count);
fread(c->h_columns[j].data(),grp_count,1,f);
};
};
};
buffersLoaded = 1;
}
unsigned int findSegmentCount(char* file_name)
{
unsigned int orig_recCount;
unsigned int comp_type, cnt;
FILE* f = fopen ( file_name , "rb" );
if (f==NULL) {
cout << "Cannot open file " << file_name << endl;
exit (1);
}
fread(&cnt, 4, 1, f);
fseeko(f, cnt*8 + 16, SEEK_CUR);
fread(&comp_type, 4, 1, f);
if(comp_type == 2)
fread(&orig_recCount, 4, 1, f);
else if(comp_type == 3)
orig_recCount = cnt;
else {
fread(&orig_recCount, 4, 1, f);
fread(&orig_recCount, 4, 1, f);
};
fclose(f);
return orig_recCount;
};
void allocColumns(CudaSet* a, queue<string> fields)
{
while(!fields.empty()) {
if(setMap.count(fields.front()) > 0) {
CudaSet *t = varNames[setMap[fields.front()]];
unsigned int idx = t->columnNames[fields.front()];
bool onDevice = 0;
if(t->type[idx] == 0) {
if(t->d_columns_int[t->type_index[idx]].size() > 0)
onDevice = 1;
}
else if(t->type[idx] == 1) {
if(t->d_columns_float[t->type_index[idx]].size() > 0)
onDevice = 1;
}
else {
if((t->h_columns_cuda_char[t->type_index[idx]])->d_columns[0].size() > 0)
onDevice = 1;
};
if (!onDevice)
t->allocColumnOnDevice(t->columnNames[fields.front()], t->maxRecs);
};
fields.pop();
};
}
unsigned int largest_prm(CudaSet* a, string field)
{
unsigned int maxx = 0;
for(unsigned int i = 0; i < a->prm_count[setMap[field]].size(); i++)
if(maxx < a->prm_count[setMap[field]][i])
maxx = a->prm_count[setMap[field]][i];
return maxx;
};
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment)
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
//find the largest possible size of a gathered segment
if(segment == 0) {
unsigned int max_count = 0;
if(a->prm.size() > 0) {
for(unsigned int i = 0; i < a->segCount; i++)
if(!a->isJoined) { // alloc just 1 segment
if (a->prm_count[setMap[field]][i] > max_count)
max_count = a->prm_count[setMap[field]][i];
}
else {
// alloc entire result
max_count = max_count + a->prm_count[setMap[field]][i];
};
}
else {
max_count = t->maxRecs;
};
a->allocColumnOnDevice(idx, max_count);
};
if(!a->isJoined) {
if(a->prm.size() > 0) {
unsigned int g_size = a->prm_count[setMap[field]][segment];
//cout << "largest prm " << largest_prm(a, field) << endl;
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
if(curr_segment != segment) {
std::clock_t start2 = std::clock();
hipMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][segment],
4*g_size, hipMemcpyHostToDevice);
curr_segment = segment;
};
if(t->type[tindex] == 0)
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin());
else if(t->type[tindex] == 1)
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin());
else
for(unsigned int j=0; j < (t->h_columns_cuda_char[t->type_index[tindex]])->mColumnCount; j++)
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
(t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].begin(),
(a->h_columns_cuda_char[a->type_index[idx]])->d_columns[j].begin());
a->mRecCount = g_size;
}
else {
if(t->type[tindex] == 0) {
thrust::copy(t->d_columns_int[t->type_index[tindex]].begin(), t->d_columns_int[t->type_index[tindex]].end(),
a->d_columns_int[a->type_index[idx]].begin());
a->mRecCount = t->d_columns_int[t->type_index[tindex]].end() - t->d_columns_int[t->type_index[tindex]].begin();
}
else if(t->type[tindex] == 1) {
thrust::copy(t->d_columns_float[t->type_index[tindex]].begin(), t->d_columns_float[t->type_index[tindex]].end(),
a->d_columns_float[a->type_index[idx]].begin());
a->mRecCount = t->d_columns_float[t->type_index[tindex]].end() - t->d_columns_float[t->type_index[tindex]].begin();
}
else {
for(unsigned int j=0; j < (t->h_columns_cuda_char[t->type_index[tindex]])->mColumnCount; j++) {
thrust::copy((t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].begin(),(t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].end(),
(a->h_columns_cuda_char[a->type_index[idx]])->d_columns[j].begin());
a->mRecCount = (t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].end() - (t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].begin();
};
};
};
}
else {
// modify prm
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
unsigned int g_size = a->prm_count[setMap[field]][0];
hipMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][0],
4*g_size, hipMemcpyHostToDevice);
if (segment != 0)
thrust::transform(a->prm_d.begin(), a->prm_d.begin() + g_size,
a->prm_d.begin(), _1 - t->maxRecs);
if(t->type[tindex] == 0)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin()+g_size, a->prm_d.begin(),
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin(), _1 < t->maxRecs );
else if(t->type[tindex] == 1)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin()+g_size, a->prm_d.begin(),
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin(), _1 < t->maxRecs);
else
for(unsigned int j=0; j < (t->h_columns_cuda_char[t->type_index[tindex]])->mColumnCount; j++)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin()+g_size, a->prm_d.begin(),
(t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].begin(),
(a->h_columns_cuda_char[a->type_index[idx]])->d_columns[j].begin(), _1 < t->maxRecs);
};
}
void gatherColumnsJoin(CudaSet* a, CudaSet* t, string field, unsigned int segment, thrust::device_ptr<unsigned int>& m, unsigned int count )
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
//cout << "gathering " << field << " " << setMap[field] << " " << tindex << " " << idx << " " << a->segCount << endl;
if(!a->isJoined) {
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
unsigned int g_size = a->prm_count[setMap[field]][segment];
hipMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][segment],
4*g_size, hipMemcpyHostToDevice);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), m + count);
}
else {
// modify prm
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
unsigned int g_size = a->prm_count[setMap[field]][0];
if(segment == 0)
hipMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][0],
4*g_size, hipMemcpyHostToDevice);
if (segment != 0)
thrust::transform(a->prm_d.begin(), a->prm_d.begin() + g_size,
a->prm_d.begin(), _1 - t->maxRecs);
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin() + g_size, a->prm_d.begin(),
t->d_columns_int[t->type_index[tindex]].begin(), m, _1 < t->maxRecs );
};
//a->mRecCount = a->prm[setMap[field]][segment].end() - a->prm[setMap[field]][segment].begin();
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment)
{
set<string> uniques;
CudaSet *t;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && setMap.count(fields.front()) > 0) {
t = varNames[setMap[fields.front()]];
if (!a->isJoined) {
t->CopyColumnToGpu(t->columnNames[fields.front()], segment); // segment i
if (a != t) {
gatherColumns(a, t, fields.front(), segment);
};
}
else {
//for all segments do copy and gather
unsigned int tindex = t->columnNames[fields.front()];
unsigned int idx = a->columnNames[fields.front()];
string field = fields.front();
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
unsigned int g_size = a->prm_count[setMap[field]][0];
hipMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][0], 4*g_size, hipMemcpyHostToDevice);
for(unsigned int i = 0; i < t->segCount; i++) {
t->CopyColumnToGpu(t->columnNames[field], i); // segment i
if (i != 0) {
thrust::transform(a->prm_d.begin(), a->prm_d.begin() + g_size,
a->prm_d.begin(), _1 - t->maxRecs);
}
else {
a->allocColumnOnDevice(a->columnNames[field], g_size);
};
if(t->type[tindex] == 0)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin() + g_size, a->prm_d.begin(),
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin(), _1 < t->maxRecs );
else if(t->type[tindex] == 1)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin() + g_size, a->prm_d.begin(),
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin(), _1 < t->maxRecs);
else
for(unsigned int j=0; j < (t->h_columns_cuda_char[t->type_index[tindex]])->mColumnCount; j++)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin() + g_size, a->prm_d.begin(),
(t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].begin(),
(a->h_columns_cuda_char[a->type_index[idx]])->d_columns[j].begin(), _1 < t->maxRecs);
};
if (t->segCount != 1)
thrust::transform(a->prm_d.begin(), a->prm_d.begin() + g_size,
a->prm_d.begin(), _1 + (t->maxRecs*(t->segCount-1)));
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void copyGatherJoin(CudaSet* a, thrust::device_ptr<unsigned int>& m, string field, unsigned int segment, unsigned int& cnt )
{
CudaSet *t;
t = varNames[setMap[field]];
unsigned int tindex = t->columnNames[field];
if (!a->isJoined) {
t->CopyColumnToGpu(t->columnNames[field], segment); // segment i
if(a != t) {
//gatherColumns(a, t, field, segment);
gatherColumnsJoin(a, t, field, segment, m, cnt);
cnt = cnt + a->prm_count[setMap[field]][segment];
}
else
cnt = t->mRecCount;
}
else {
//for all segments do copy and gather
for(unsigned int i = 0; i < t->segCount; i++) {
t->CopyColumnToGpu(t->columnNames[field], i); // segment i
gatherColumnsJoin(a, t, field, i, m, cnt);
};
// transform prm back
//a->prm_d = a->prm[setMap[field]][0];
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
unsigned int g_size = a->prm_count[setMap[field]][0];
hipMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][0], 4*g_size, hipMemcpyHostToDevice);
if (t->segCount != 1)
thrust::transform(a->prm_d.begin(), a->prm_d.begin() + g_size,
a->prm_d.begin(), _1 + (t->maxRecs*(t->segCount-1)));
cnt = cnt + g_size;
};
}
| 4e33e93f3964fd709d39a2a760458985601ed706.cu | /*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/count.h>
#include <thrust/sort.h>
#include <thrust/set_operations.h>
#include <thrust/gather.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/adjacent_difference.h>
#include <thrust/transform.h>
#include <cuda.h>
#include <stdlib.h>
#include <iostream>
#include <sstream>
#include <stdio.h>
#include <fstream>
#include <iomanip>
#include <queue>
#include <set>
#include <stack>
#include <string>
#include <map>
#include <ctime>
#ifdef _WIN64
#include <process.h>
#include <windows.h>
#else
#include <pthread.h>
#endif
#include "cm.h"
#include "atof.h"
#include "itoa.h"
#include "compress.cu"
#include "/home/liuexp/Downloads/cudpp_src_2.0/include/cudpp_hash.h"
#ifdef _WIN64
#define fseeko _fseeki64
#define ftello _ftelli64
#else
#define _FILE_OFFSET_BITS 64
#define fseeko fseek
#define ftello ftell
#endif
using namespace std;
using namespace thrust::placeholders;
unsigned int process_count;
long long int runningRecs = 0;
long long int totalRecs = 0;
bool fact_file_loaded = 0;
bool buffersEmpty = 0;
unsigned long long int total_count = 0;
unsigned int total_segments = 0;
unsigned int total_max;
void* d_v = NULL;
void* s_v = NULL;
unsigned int curr_segment = 0;
map<string,queue<string> > top_type;
map<string,queue<string> > top_value;
map<string,queue<int_type> > top_nums;
map<string,queue<float_type> > top_nums_f;
template <typename HeadFlagType>
struct head_flag_predicate
: public thrust::binary_function<HeadFlagType,HeadFlagType,bool>
{
__host__ __device__
bool operator()(HeadFlagType left, HeadFlagType right) const
{
return !left;
}
};
struct is_match
{
__host__ __device__
bool operator()(unsigned int x)
{
return x != 4294967295;
}
};
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return !(((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
struct l_to_ui
{
__host__ __device__
float_type operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct float_to_decimal
{
__host__ __device__
float_type operator()(const float_type x)
{
return (int_type)(x*100);
}
};
struct to_zero
{
__host__ __device__
bool operator()(const int_type x)
{
if(x == -1)
return 0;
else
return 1;
}
};
struct div_long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x, const float_type y)
{
return (float_type)x/y;
}
};
struct float_to_long
{
__host__ __device__
long long int operator()(const float_type x)
{
if ((long long int)((x+EPSILON)*100.0) > (long long int)(x*100.0))
return (long long int)((x+EPSILON)*100.0);
else return (long long int)(x*100.0);
}
};
struct long_to_float
{
__host__ __device__
float_type operator()(const long long int x)
{
return (((float_type)x)/100.0);
}
};
struct Uint2Sum
{
__host__ __device__ uint2 operator()(uint2& a, uint2& b)
{
//a.x += b.x;
a.y += b.y;
return a;
}
};
struct uint2_split
{
const uint2* d_res;
unsigned int * output;
uint2_split(const uint2* _d_res, unsigned int * _output):
d_res(_d_res), output(_output) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
output[i] = d_res[i].y;
}
};
struct join_functor
{
const uint2* d_res;
const unsigned int* d_addr;
unsigned int * output;
unsigned int * output1;
join_functor(const uint2* _d_res, const unsigned int * _d_addr, unsigned int * _output, unsigned int * _output1):
d_res(_d_res), d_addr(_d_addr), output(_output), output1(_output1) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
if (d_res[i].x || d_res[i].y) {
for(unsigned int z = 0; z < d_res[i].y; z++) {
output[d_addr[i] + z] = i;
output1[d_addr[i] + z] = d_res[i].x + z;
};
};
}
};
struct cmp_functor
{
const char * src;
int_type * output;
const char * str;
const unsigned int * len;
cmp_functor(const char * _src, int_type * _output, const char * _str, const unsigned int * _len):
src(_src), output(_output), str(_str), len(_len) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
if(src[i] != 0 && output[i] >= 0 && output[i] < *len ) {
if ( src[i] == str[(*len-output[i]) - 1])
output[i]++;
else
output[i] = -1;
};
}
};
class CudaSet;
void LoadBuffers(void* file_name);
void* LoadBuffers1(void* file_name);
void allocColumns(CudaSet* a, queue<string> fields);
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment);
void copyGatherJoin(CudaSet* a, thrust::device_ptr<unsigned int>& m, string field, unsigned int segment, unsigned int& cnt);
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string,string> setMap; //map to keep track of column names and set names
CudaSet* bck;
queue<string> bck_fields;
bool all_copied = 0;
unsigned int bck_segment;
unsigned int findSegmentCount(char* file_name);
CudaSet *th;
bool buffersLoaded;
size_t getFreeMem();
bool zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a);
class CudaChar
{
public:
std::vector<thrust::host_vector<char> > h_columns;
std::vector<thrust::device_vector<char> > d_columns;
thrust::host_vector<char> compressed;
unsigned int mColumnCount;
unsigned int mRecCount;
CudaChar(unsigned int columnCount, unsigned int Recs)
: mColumnCount(0),
mRecCount(0)
{
initialize(columnCount, Recs);
}
CudaChar(unsigned int columnCount, unsigned int Recs, bool gpu)
: mColumnCount(0),
mRecCount(0)
{
initialize(columnCount, Recs, gpu);
}
CudaChar(unsigned int columnCount, unsigned int Recs, bool gpu, long long int compressed_size)
: mColumnCount(0),
mRecCount(0)
{
initialize(columnCount, Recs, gpu, compressed_size);
}
void findMinMax(string& minStr, string& maxStr)
{
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(mRecCount);
thrust::sequence(permutation, permutation+mRecCount);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount));
for(int j=mColumnCount-1; j>=0 ; j--)
update_permutation(d_columns[j], raw_ptr, mRecCount, "ASC", (char*)temp);
minStr = "";
maxStr = "";
for(unsigned int j=0; j<mColumnCount; j++) {
minStr+=(d_columns[j])[permutation[0]];
maxStr+=(d_columns[j])[permutation[mRecCount-1]];
};
cudaFree(temp);
cudaFree(raw_ptr);
}
void resize(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++)
h_columns[i].resize(mRecCount);
}
void allocOnDevice(unsigned int RecordCount)
{
mRecCount = RecordCount;
for(unsigned int i=0; i <mColumnCount; i++)
d_columns[i].resize(mRecCount);
}
void deAllocOnDevice()
{
if (d_columns.size())
for(unsigned int i=0; i <mColumnCount; i++) {
d_columns[i].resize(0);
d_columns[i].shrink_to_fit();
};
};
void CopyToGpu(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++)
thrust::copy(h_columns[i].begin() + offset, h_columns[i].begin() + offset +count, d_columns[i].begin());
};
void CopyToHost(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++)
thrust::copy(d_columns[i].begin(), d_columns[i].begin() + count, h_columns[i].begin() + offset);
};
bool* cmpStr(string str)
{
if (str[str.size()-1] == '%' && str[0] == '%') { // contains
if(str.size() > mColumnCount) {
thrust::device_ptr<bool> res_f = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(res_f, res_f+mRecCount, 0, 0);
return thrust::raw_pointer_cast(res_f);
}
else {
return 0;
};
}
else if(str[str.size()-1] == '%') { // startsWith
if(str.size() > mColumnCount) {
thrust::device_ptr<bool> res_f = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(res_f, res_f+mRecCount, 0, 0);
return thrust::raw_pointer_cast(res_f);
}
else {
thrust::device_ptr<bool> v = thrust::device_malloc<bool>(mRecCount);
str.erase(str.size()-1,1);
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(res, res+mRecCount, 1, 0);
for(int i = 0; i < str.size()-1; i++) {
thrust::transform(d_columns[i].begin(), d_columns[i].begin()+mRecCount, thrust::constant_iterator<char>(str[i]), v, thrust::equal_to<char>());
thrust::transform(v, v+mRecCount, res, res, thrust::logical_and<bool>());
};
thrust::device_free(v);
return thrust::raw_pointer_cast(res);
};
}
else if(str[0] == '%' ) { // endsWith
str.erase(0,1);
thrust::device_ptr<char> dev_str = thrust::device_malloc<char>(str.size());
thrust::device_ptr<unsigned int> len = thrust::device_malloc<unsigned int>(1);
thrust::device_ptr<int_type> output = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(output, output+mRecCount, 0, 0);
len[0] = str.size();
for(int z=0; z < str.size(); z++)
dev_str[z] = str[z];
for(int i = mColumnCount-1; i >= 0; i--) {
thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0);
cmp_functor ff(thrust::raw_pointer_cast(d_columns[i].data()),
thrust::raw_pointer_cast(output),
thrust::raw_pointer_cast(dev_str),
thrust::raw_pointer_cast(len));
thrust::for_each(begin, begin + mRecCount, ff);
};
thrust::transform(output, output+mRecCount, res, to_zero());
return thrust::raw_pointer_cast(res);
}
else { // equal
thrust::device_ptr<bool> v = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(res, res+mRecCount, 1, 0);
if(mColumnCount < str.length())
{
thrust::sequence(res, res+mRecCount, 0, 0);
return thrust::raw_pointer_cast(res);
};
for(unsigned int i = 0; i < mColumnCount; i++) {
if (str.length() >= i+1)
thrust::transform(d_columns[i].begin(), d_columns[i].begin()+mRecCount, thrust::constant_iterator<char>(str[i]), v, thrust::equal_to<char>());
else
thrust::transform(d_columns[i].begin(), d_columns[i].begin()+mRecCount, thrust::constant_iterator<char>(0), v, thrust::equal_to<char>());
thrust::transform(v, v+mRecCount, res, res, thrust::logical_and<int_type>());
};
thrust::device_free(v);
return thrust::raw_pointer_cast(res);
};
};
protected: // methods
void initialize(unsigned int columnCount, unsigned int Recs)
{
mColumnCount = columnCount;
mRecCount = Recs;
for(unsigned int i=0; i <mColumnCount; i++) {
h_columns.push_back(thrust::host_vector<char>(Recs));
d_columns.push_back(thrust::device_vector<char>());
};
};
void initialize(unsigned int columnCount, unsigned int Recs, bool gpu)
{
mColumnCount = columnCount;
mRecCount = Recs;
for(unsigned int i=0; i <mColumnCount; i++) {
h_columns.push_back(thrust::host_vector<char>());
d_columns.push_back(thrust::device_vector<char>());
};
};
void initialize(unsigned int columnCount, unsigned int Recs, bool gpu, long long int compressed_size)
{
mColumnCount = columnCount;
mRecCount = Recs;
for(unsigned int i=0; i <mColumnCount; i++) {
h_columns.push_back(thrust::host_vector<char>());
d_columns.push_back(thrust::device_vector<char>());
};
compressed.resize(compressed_size);
};
};
class CudaSet
{
public:
std::vector<thrust::host_vector<int_type> > h_columns_int;
std::vector<thrust::host_vector<float_type> > h_columns_float;
std::vector<thrust::host_vector<char> > h_columns_char;
std::vector<CudaChar*> h_columns_cuda_char;
std::vector<thrust::device_vector<int_type> > d_columns_int;
std::vector<thrust::device_vector<float_type> > d_columns_float;
thrust::device_vector<unsigned int> prm_d;
map<string, std::vector<unsigned int*> > prm; //represents an op's permutation of original data vectors
//string is a set name
//unsigned int* is an adress of the permutation array
map<string, std::vector<unsigned int> > prm_count; // counts of prm permutations
map<unsigned int, unsigned int> type_index;
unsigned int mColumnCount;
unsigned int mRecCount;
map<string,int> columnNames;
map<string, FILE*> filePointers;
bool *grp;
queue<string> columnGroups;
bool fact_table; // 1 = host recs are not compressed, 0 = compressed
FILE *file_p;
unsigned long long int *offsets; // to store the current offsets for compression routines
unsigned int *seq;
bool keep;
unsigned int segCount, maxRecs;
string name;
//CudaSet* filter_ref;
char* load_file_name;
unsigned int oldRecCount;
unsigned int* type; // 0 - integer, 1-float_type, 2-char
bool* decimal; // column is decimal - affects only compression
unsigned int* grp_type; // type of group : SUM, AVG, COUNT etc
unsigned int* cols; // column positions in a file
unsigned int grp_count;
bool partial_load;
bool isJoined;
CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
: mColumnCount(0),
mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
keep = false;
offsets = 0;
partial_load = 0;
isJoined = 0;
}
CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name)
: mColumnCount(0),
mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
keep = false;
offsets = 0;
partial_load = 1;
isJoined = 0;
}
CudaSet(unsigned int RecordCount, unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
offsets = 0;
partial_load = 0;
isJoined = 0;
};
CudaSet(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b,Recs, op_sel, op_sel_as);
keep = false;
partial_load = 0;
isJoined = 1;
};
~CudaSet()
{
free();
}
void resize(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0)
h_columns_int[type_index[i]].resize(mRecCount);
else if(type[i] == 1)
h_columns_float[type_index[i]].resize(mRecCount);
else
h_columns_cuda_char[type_index[i]]->resize(addRecs);
};
}
void allocColumnOnDevice(unsigned int colIndex, unsigned int RecordCount)
{
if (type[colIndex] == 0)
d_columns_int[type_index[colIndex]].resize(RecordCount);
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(RecordCount);
else
h_columns_cuda_char[type_index[colIndex]]->allocOnDevice(RecordCount);
};
void deAllocColumnOnDevice(unsigned int colIndex)
{
if (type[colIndex] == 0 && d_columns_int.size()) {
d_columns_int[type_index[colIndex]].resize(0);
d_columns_int[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 1 && d_columns_float.size()) {
d_columns_float[type_index[colIndex]].resize(0);
d_columns_float[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 2 && h_columns_cuda_char.size())
h_columns_cuda_char[type_index[colIndex]]->deAllocOnDevice();
};
void setTypes(CudaSet* b)
{
for(unsigned int i=0; i < b->mColumnCount; i++)
type[i] = b->type[i];
};
void allocOnDevice(unsigned int RecordCount)
{
for(unsigned int i=0; i < mColumnCount; i++)
allocColumnOnDevice(i, RecordCount);
};
void deAllocOnDevice()
{
for(unsigned int i=0; i <mColumnCount; i++)
deAllocColumnOnDevice(i);
if(!columnGroups.empty() && mRecCount !=0) {
cudaFree(grp);
grp = NULL;
};
};
void resizeDeviceColumn(unsigned int RecCount, unsigned int colIndex)
{
if (RecCount) {
if (type[colIndex] == 0)
d_columns_int[type_index[colIndex]].resize(mRecCount+RecCount);
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(mRecCount+RecCount);
else {
for(unsigned int i = 0; i < h_columns_cuda_char[type_index[colIndex]]->mColumnCount; i++)
(h_columns_cuda_char[type_index[colIndex]]->d_columns[i]).resize(mRecCount+RecCount);
};
};
};
void resizeDevice(unsigned int RecCount)
{
if (RecCount)
for(unsigned int i=0; i < mColumnCount; i++)
resizeDeviceColumn(RecCount, i);
};
bool onDevice(unsigned int i)
{
unsigned j = type_index[i];
if (type[i] == 0) {
if (!d_columns_int.size())
return 0;
if (d_columns_int[j].size() == 0)
return 0;
}
else if (type[i] == 1) {
if (!d_columns_float.size())
return 0;
if(d_columns_float[j].size() == 0)
return 0;
}
else if (type[i] == 2) {
if(!h_columns_cuda_char.size())
return 0;
if(h_columns_cuda_char[j]->d_columns[0].size() == 0)
return 0;
};
return 1;
}
CudaSet* copyStruct(unsigned int mCount)
{
CudaSet* a = new CudaSet(mCount, mColumnCount);
a->fact_table = fact_table;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if (a->type[i] == 0) {
a->h_columns_int.push_back(thrust::host_vector<int_type>(mCount));
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->type_index[i] = a->h_columns_int.size()-1;
}
else if (a->type[i] == 1) {
a->h_columns_float.push_back(thrust::host_vector<float_type>(mCount));
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->type_index[i] = a->h_columns_float.size()-1;
}
else {
a->h_columns_cuda_char.push_back(new CudaChar((h_columns_cuda_char[type_index[i]])->mColumnCount, mCount));
a->type_index[i] = a->h_columns_cuda_char.size()-1;
};
};
return a;
}
CudaSet* copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->fact_table = fact_table;
a->segCount = segCount;
a->maxRecs = 0;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if(a->type[i] == 0) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->h_columns_int.push_back(thrust::host_vector<int_type>());
a->type_index[i] = a->d_columns_int.size()-1;
}
else if(a->type[i] == 1) {
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->h_columns_float.push_back(thrust::host_vector<float_type>());
a->type_index[i] = a->d_columns_float.size()-1;
a->decimal[i] = decimal[i];
}
else {
a->h_columns_cuda_char.push_back(new CudaChar((h_columns_cuda_char[type_index[i]])->mColumnCount, mRecCount, 0));
a->type_index[i] = a->h_columns_cuda_char.size()-1;
};
};
if(!a->fact_table) {
a->offsets = new unsigned long long int[mColumnCount];
for(unsigned int i =0; i < mColumnCount; i++)
a->offsets[i] = 0;
};
a->mRecCount = 0;
return a;
}
unsigned long long int readSegmentsFromFile(unsigned int segNum, unsigned int colIndex)
{
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
FILE* f;
int cnt, grp_count;
unsigned long long int offset = 0;
f = fopen (f1 , "rb" );
// cout << "file " << f1 << " " << segNum << endl;
for(unsigned int i = 0; i < segNum; i++) {
if(type[colIndex] != 2) {
fread((char *)&cnt, 4, 1, f);
offset = offset + cnt + 8;
fseeko(f, offset*8 , SEEK_SET);
}
else {
fread((char *)&cnt, 4, 1, f);
offset = offset + cnt*8 + 12;
fseeko(f, offset , SEEK_SET);
fread((char *)&grp_count, 4, 1, f);
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
offset = offset + 11*4 + grp_count*c->mColumnCount;
fseeko(f, offset , SEEK_SET);
};
};
// find out how much we need to read and rewind back to the start of the segment
if(type[colIndex] != 2) {
fread((char *)&cnt, 4, 1, f);
fseeko(f, -4 , SEEK_CUR);
}
else {
fread((char *)&cnt, 4, 1, f);
offset = cnt*8 + 8;
fseeko(f, offset , SEEK_CUR);
fread((char *)&grp_count, 4, 1, f);
fseeko(f, -(cnt*8+16) , SEEK_CUR);
};
// resize the host arrays if necessary
// and read the segment from a file
if(type[colIndex] == 0) {
if(h_columns_int[type_index[colIndex]].size() < cnt+9) {
//resize(cnt+9-h_columns_int[type_index[colIndex]].size());
h_columns_int[type_index[colIndex]].resize(cnt+9);
};
fread(h_columns_int[type_index[colIndex]].data(),(cnt+8)*8,1,f);
}
else if(type[colIndex] == 1) {
if(h_columns_float[type_index[colIndex]].size() < cnt+9) {
//resize(cnt+9-h_columns_int[type_index[colIndex]].size());
h_columns_float[type_index[colIndex]].resize(cnt+9);
};
fread(h_columns_float[type_index[colIndex]].data(),(cnt+8)*8,1,f);
}
else {
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
if(c->compressed.size() < cnt*8 + 14*4 + grp_count*c->mColumnCount)
c->compressed.resize(cnt*8 + 14*4 + grp_count*c->mColumnCount);
fread(c->compressed.data(), cnt*8 + 14*4 + grp_count*c->mColumnCount,1,f);
};
fclose(f);
return 0;
}
unsigned long long int readSegments(unsigned int segNum, unsigned int colIndex) // read segNum number of segments and return the offset of the next segment
{
unsigned long long int offset = 0; // offset measured in bytes if checking chars and in 8 byte integers if checking ints and decimals
unsigned int grp_count;
unsigned int data_len;
for(unsigned int i = 0; i < segNum; i++) {
if(type[colIndex] == 0) {
data_len = ((unsigned int*)((h_columns_int[type_index[colIndex]]).data() + offset))[0];
offset = offset + data_len + 8;
}
else if(type[colIndex] == 1) {
data_len = ((unsigned int*)((h_columns_float[type_index[colIndex]]).data() + offset))[0];
offset = offset + data_len + 8;
}
else {
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
data_len = ((unsigned int*)(c->compressed.data() + offset))[0];
grp_count = ((unsigned int*)(c->compressed.data() + offset + 8*data_len + 12))[0];
offset = offset + data_len*8 + 14*4 + grp_count*c->mColumnCount;
};
};
return offset;
}
void CopyToGpu(unsigned int offset, unsigned int count)
{
if (fact_table) {
for(unsigned int i = 0; i < mColumnCount; i++) {
switch(type[i]) {
case 0 :
thrust::copy(h_columns_int[type_index[i]].begin() + offset, h_columns_int[type_index[i]].begin() + offset + count, d_columns_int[type_index[i]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[i]].begin() + offset, h_columns_float[type_index[i]].begin() + offset + count, d_columns_float[type_index[i]].begin());
break;
default :
(h_columns_cuda_char[type_index[i]])->CopyToGpu(offset, count);
};
};
}
else
for(unsigned int i = 0; i < mColumnCount; i++)
CopyColumnToGpu(i, offset, count);
}
void CopyToGpu(unsigned int segment)
{
if (fact_table) {
for(unsigned int i = 0; i < mColumnCount; i++) {
switch(type[i]) {
case 0 :
thrust::copy(h_columns_int[type_index[i]].begin(), h_columns_int[type_index[i]].begin() + mRecCount, d_columns_int[type_index[i]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[i]].begin(), h_columns_float[type_index[i]].begin() + mRecCount, d_columns_float[type_index[i]].begin());
break;
default :
(h_columns_cuda_char[type_index[i]])->CopyToGpu(0, mRecCount);
};
};
}
else
for(unsigned int i = 0; i < mColumnCount; i++)
CopyColumnToGpu(i, segment);
}
void CopyColumnToGpu(unsigned int colIndex, unsigned int segment)
{
if(fact_table) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
(h_columns_cuda_char[type_index[colIndex]])->CopyToGpu(0, mRecCount);
};
}
else {
//cout << "start " << colIndex << " " << type[colIndex] << " " << segment << " " << partial_load << endl;
unsigned long long int data_offset;
if (partial_load)
data_offset = readSegmentsFromFile(segment,colIndex);
else
data_offset = readSegments(segment,colIndex);
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
switch(type[colIndex]) {
case 0 :
pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data()), h_columns_int[type_index[colIndex]].data() + data_offset, &mRecCount, 0, NULL, d_v, s_v);
break;
case 1 :
if(decimal[colIndex]) {
pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()) , h_columns_float[type_index[colIndex]].data() + data_offset, &mRecCount, 0, NULL, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin(), long_to_float());
}
//else // uncompressed float
//cudaMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, cudaMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
break;
default :
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
unsigned int data_len = ((unsigned int*)(c->compressed.data() + data_offset))[0];
grp_count = ((unsigned int*)(c->compressed.data() + data_offset + data_len*8 + 12))[0];
pfor_dict_decompress(c->compressed.data() + data_offset, c->h_columns , c->d_columns, &mRecCount, NULL,0, c->mColumnCount, 0, d_v, s_v);
};
//cudaFree(d_v);
//cudaFree(s_v);
};
}
void CopyColumnToGpu(unsigned int colIndex) // copy all segments
{
if(fact_table) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
(h_columns_cuda_char[type_index[colIndex]])->CopyToGpu(0, mRecCount);
};
}
else {
long long int data_offset;
unsigned int totalRecs = 0;
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
for(unsigned int i = 0; i < segCount; i++) {
if (partial_load)
data_offset = readSegmentsFromFile(i,colIndex);
else
data_offset = readSegments(i,colIndex);
switch(type[colIndex]) {
case 0 :
pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + totalRecs), h_columns_int[type_index[colIndex]].data() + data_offset, &mRecCount, 0, NULL, d_v, s_v);
break;
case 1 :
if(decimal[colIndex]) {
pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs) , h_columns_float[type_index[colIndex]].data() + data_offset, &mRecCount, 0, NULL, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + totalRecs, long_to_float());
}
// else uncompressed float
//cudaMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, cudaMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
break;
default :
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
pfor_dict_decompress(c->compressed.data() + data_offset, c->h_columns , c->d_columns, &mRecCount, NULL,0, c->mColumnCount, totalRecs, d_v, s_v);
};
totalRecs = totalRecs + mRecCount;
};
cudaFree(d_v);
cudaFree(s_v);
mRecCount = totalRecs;
};
}
void CopyColumnToGpu(unsigned int colIndex, unsigned int offset, unsigned int count)
{
if(fact_table) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + offset + count, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + offset + count, d_columns_float[type_index[colIndex]].begin());
break;
default :
(h_columns_cuda_char[type_index[colIndex]])->CopyToGpu(offset, count);
};
}
else {
unsigned int start_seg, seg_num, grp_count, data_len, mCount;
start_seg = offset/segCount; // starting segment
seg_num = count/segCount; // number of segments that we need
long long int data_offset;
if(partial_load)
data_offset = readSegmentsFromFile(start_seg,colIndex);
else
data_offset = readSegments(start_seg,colIndex);
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
switch(type[colIndex]) {
case 0 :
for(unsigned int j = 0; j < seg_num; j++) {
data_len = ((unsigned int*)(h_columns_int[type_index[colIndex]].data()))[data_offset];
pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + segCount*j), h_columns_int[type_index[colIndex]].data() + data_offset, &data_len, 0, NULL, d_v, s_v);
data_offset = data_offset + data_len + 8;
};
break;
case 1 :
if(decimal[colIndex]) {
for(unsigned int j = 0; j < seg_num; j++) {
data_len = (((unsigned int*)(h_columns_int[type_index[colIndex]]).data()))[data_offset];
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + segCount*j));
pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + segCount*j), h_columns_float[type_index[colIndex]].data() + data_offset, &data_len, 0, NULL, d_v, s_v);
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + segCount*j, long_to_float());
data_offset = data_offset + data_len + 8;
};
}
else // uncompressed float
thrust::copy(h_columns_float[type_index[colIndex]].begin() + offset, h_columns_float[type_index[colIndex]].begin() + offset + count, d_columns_float[type_index[colIndex]].begin());
break;
default :
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
for(unsigned int j = 0; j < seg_num; j++) {
data_len = ((unsigned int*)(c->compressed.data() + data_offset))[0];
grp_count = ((unsigned int*)(c->compressed.data() + data_offset + data_len*8 + 12))[0];
pfor_dict_decompress(c->compressed.data() + data_offset, c->h_columns , c->d_columns, &mCount, NULL,0, c->mColumnCount, segCount*j, d_v, s_v);
data_offset = data_offset + data_len*8 + 14*4 + grp_count*c->mColumnCount;
};
};
cudaFree(d_v);
cudaFree(s_v);
};
}
void CopyColumnToHost(int colIndex, unsigned int offset, unsigned int RecCount)
{
if(fact_table) {
switch(type[colIndex]) {
case 0 :
thrust::copy(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + RecCount, h_columns_int[type_index[colIndex]].begin() + offset);
break;
case 1 :
thrust::copy(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + RecCount, h_columns_float[type_index[colIndex]].begin() + offset);
break;
default :
(h_columns_cuda_char[type_index[colIndex]])->CopyToHost(offset,RecCount);
}
}
else {
unsigned long long int comp_offset = 0;
switch(type[colIndex]) {
case 0 :
comp_offset = pfor_compress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data()), RecCount*int_size, NULL, h_columns_int[type_index[colIndex]], 0, comp_offset);
break;
case 1 :
if (decimal[colIndex]) {
thrust::device_ptr<long long int> d_col_dec((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() ));
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin()+RecCount,
d_col_dec, float_to_long());
comp_offset = pfor_compress(thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()), RecCount*float_size, NULL, h_columns_float[type_index[colIndex]], 0, comp_offset);
}
else { // add code for float
} ;
break;
default :
CudaChar *s = (h_columns_cuda_char)[type_index[colIndex]];
comp_offset = pfor_dict_compress(s->d_columns, s->mColumnCount, NULL, RecCount, s->compressed, comp_offset);
};
};
}
void CopyColumnToHost(int colIndex)
{
CopyColumnToHost(colIndex, 0, mRecCount);
}
void CopyToHost(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++)
CopyColumnToHost(i, offset, count);
}
float_type* get_float_type_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data());
}
int_type* get_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data());
}
float_type* get_host_float_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_float[type_index[colIndex]].data());
}
int_type* get_host_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_int[type_index[colIndex]].data());
}
void GroupBy(queue<string> columnRef)
{
int grpInd, colIndex;
if(!columnGroups.empty())
cudaFree(grp);
CUDA_SAFE_CALL(cudaMalloc((void **) &grp, mRecCount * sizeof(bool))); // d_di is the vector for segmented scans
thrust::device_ptr<bool> d_grp(grp);
thrust::sequence(d_grp, d_grp+mRecCount, 0, 0);
thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(mRecCount);
d_group[mRecCount-1] = 1;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
columnGroups.push(columnRef.front()); // save for future references
colIndex = columnNames[columnRef.front()];
if(!onDevice(colIndex)) {
allocColumnOnDevice(colIndex,mRecCount);
CopyColumnToGpu(colIndex, 0, mRecCount);
grpInd = 1;
}
else
grpInd = 0;
if (type[colIndex] == 0) { // int_type
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
}
else if (type[colIndex] == 1) { // float_type
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_float[type_index[colIndex]].begin()+1, d_group, f_not_equal_to());
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
}
else { // CudaChar
CudaChar* c = h_columns_cuda_char[type_index[colIndex]];
for(unsigned int j=0; j < c->mColumnCount; j++) {
thrust::transform(c->d_columns[j].begin(), c->d_columns[j].begin() + mRecCount - 1, c->d_columns[j].begin()+1, d_group, thrust::not_equal_to<char>());
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<int>());
}
};
if (grpInd == 1)
deAllocColumnOnDevice(colIndex);
};
thrust::device_free(d_group);
grp_count = thrust::count(d_grp, d_grp+mRecCount,1);
}
void addDeviceColumn(int_type* col, int colIndex, string colName, int_type recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
d_columns_int.push_back(thrust::device_vector<int_type>(recCount));
type_index[colIndex] = d_columns_int.size()-1;
}
else { // already exists, my need to resize it
if(mRecCount < recCount)
resizeDeviceColumn(colIndex, recCount-mRecCount);
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[type_index[colIndex]].begin());
mRecCount = recCount;
};
void addDeviceColumn(float_type* col, int colIndex, string colName, int_type recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
d_columns_float.push_back(thrust::device_vector<float_type>(recCount));
type_index[colIndex] = d_columns_float.size()-1;
}
else { // already exists, my need to resize it
if(mRecCount < recCount)
resizeDeviceColumn(colIndex, recCount-mRecCount);
};
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[type_index[colIndex]].begin());
mRecCount = recCount;
};
void addHostColumn(int_type* col, int colIndex, string colName, int_type recCount, int_type old_reccount, bool one_line)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
if (!one_line) {
h_columns_int.push_back(thrust::host_vector<int_type>(old_reccount));
type_index[colIndex] = h_columns_int.size()-1;
}
else {
h_columns_int.push_back(thrust::host_vector<int_type>(1));
type_index[colIndex] = h_columns_int.size()-1;
};
};
if (!one_line) {
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, h_columns_int[type_index[colIndex]].begin() + mRecCount);
}
else {
thrust::device_ptr<int_type> src(col);
(h_columns_int[type_index[colIndex]])[0] = (h_columns_int[type_index[colIndex]])[0] + src[0];
};
};
void addHostColumn(float_type* col, int colIndex, string colName, int_type recCount, int_type old_reccount, bool one_line)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
if (!one_line) {
h_columns_float.push_back(thrust::host_vector<float_type>(old_reccount));
type_index[colIndex] = h_columns_float.size()-1;
}
else {
h_columns_float.push_back(thrust::host_vector<float_type>(1));
type_index[colIndex] = h_columns_float.size()-1;
};
};
if (!one_line) {
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, h_columns_float[type_index[colIndex]].begin() + mRecCount);
}
else {
thrust::device_ptr<float_type> src(col);
(h_columns_float[type_index[colIndex]])[0] = (h_columns_float[type_index[colIndex]])[0] + src[0];
};
};
void Store(char* file_name, char* sep, unsigned int limit, bool binary )
{
if (mRecCount == 0 && binary == 1) { // write tails
char str[100];
char col_pos[3];
for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
fstream binary_file(str,ios::out|ios::binary|ios::app);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&total_segments, 4);
binary_file.write((char *)&total_max, 4);
binary_file.close();
};
return;
};
unsigned int mCount;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else
mCount = mRecCount;
if(binary == 0) {
if(prm.size() > 0) { // data permuted
// allocate on device and gather
queue<string> op_vx;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
op_vx.push((*it).first);
allocColumns(this, op_vx);
copyColumns(this, op_vx, 0);
};
FILE *file_pr = fopen(file_name, "w");
if (file_pr == NULL)
cout << "Could not open file " << file_name << endl;
char buffer [33];
if(onDevice(0)) {
if(h_columns_int.size() == 0 && h_columns_float.size() == 0) {
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 0)
h_columns_int.push_back(thrust::host_vector<int_type>(mCount));
else if(type[i] == 1)
h_columns_float.push_back(thrust::host_vector<float_type>(mCount));
};
resize(mCount+1);
bool ch = 0;
if(!fact_table) {
fact_table = 1;
ch = 1;
};
CopyToHost(0,mCount);
if(ch)
fact_table = 0;
}
else {
if(!fact_table) { // compressed on the host
allocOnDevice(mCount);
for(unsigned int i=0; i < mColumnCount; i++) {
CopyColumnToGpu(i);
resize(mCount+1);
};
fact_table = 1;
CopyToHost(0,mCount);
fact_table = 0;
};
};
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
CudaChar* cc = h_columns_cuda_char[type_index[j]];
char *buf = new char[(cc->mColumnCount)+1];
for(unsigned int z=0; z<(cc->mColumnCount); z++)
buf[z] = (cc->h_columns[z])[i];
buf[cc->mColumnCount] = 0;
fputs(buf, file_pr);
fputs(sep, file_pr);
delete [] buf;
};
};
if (i != mCount -1)
fputs("\n",file_pr);
};
fclose(file_pr);
}
else { //writing a binary file
char str[100];
char col_pos[3];
total_count = total_count + mCount;
total_segments = total_segments + 1;
if (mCount > total_max)
total_max = mCount;
bool in_gpu = false;
if(onDevice(0))
in_gpu = true;
void* d;
if(!in_gpu)
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
void* host;
cudaMallocHost(&host, float_size*mCount);
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2 && !in_gpu ) {
allocColumnOnDevice(i, mCount);
CopyColumnToGpu(i, 0, mCount);
};
for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
if(type[i] == 0) {
if(!in_gpu) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::copy(h_columns_int[type_index[i]].begin(), h_columns_int[type_index[i]].begin() + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
}
else
pfor_compress( thrust::raw_pointer_cast(d_columns_int[type_index[i]].data()), mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
}
else if(type[i] == 1) {
if(decimal[i]) {
if(!in_gpu) {
thrust::device_ptr<float_type> d_col((float_type*)d);
thrust::copy(h_columns_float[type_index[i]].begin(), h_columns_float[type_index[i]].begin() + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
}
else {
thrust::device_ptr<long long int> d_col_dec((long long int*)(thrust::raw_pointer_cast(d_columns_float[type_index[i]].data()) ));
thrust::transform(d_columns_float[type_index[i]].begin(),d_columns_float[type_index[i]].begin()+mCount, d_col_dec, float_to_long());
pfor_compress( thrust::raw_pointer_cast(d_columns_float[type_index[i]].data()), mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
};
}
else { // do not compress
fstream binary_file(str,ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
if(in_gpu) {
cudaMemcpy(host, thrust::raw_pointer_cast(d_columns_float[type_index[i]].data()), mCount*float_size, cudaMemcpyDeviceToHost);
binary_file.write((char *)host,mCount*float_size);
}
else
binary_file.write((char *)(h_columns_float[type_index[i]].data()),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
CudaChar *a = h_columns_cuda_char[type_index[i]];
thrust::host_vector<char> hh(mCount*8);
pfor_dict_compress(a->d_columns, a->mColumnCount, str, mCount, hh, 0);
};
if(fact_file_loaded) {
fstream binary_file(str,ios::out|ios::binary|ios::app);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&total_segments, 4);
binary_file.write((char *)&total_max, 4);
binary_file.close();
};
};
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2 && !in_gpu)
deAllocColumnOnDevice(i);
if(!in_gpu)
cudaFree(d);
cudaFreeHost(host);
}
}
void LoadFile(char* file_name, char* sep )
{
unsigned int count = 0;
char line[500];
int l;
char* field;
unsigned int current_column = 1;
FILE *file_ptr = fopen(file_name, "r");
if (file_ptr == NULL)
cout << "Could not open file " << file_name << endl;
unsigned int *seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
while (fgets(line, 500, file_ptr) != NULL ) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoi(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoi(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {
l = strlen(field);
for(int j =0; j< l; j++)
((h_columns_cuda_char[type_index[seq[i]]])->h_columns[j])[count] = field[j];
for(unsigned int j =l; j< (h_columns_cuda_char[type_index[i]])->mColumnCount; j++)
((h_columns_cuda_char[type_index[seq[i]]])->h_columns[j])[count] = 0;
};
};
count++;
if (count == mRecCount) {
mRecCount = mRecCount + process_count;
resize(mRecCount);
};
};
fclose(file_ptr);
mRecCount = count;
}
int LoadBigFile(const char* file_name, const char* sep )
{
unsigned int count = 0;
char line[500];
char* field;
unsigned int current_column = 1;
unsigned int l;
if (file_p == NULL)
file_p = fopen(file_name, "r");
if (file_p == NULL)
cout << "Could not open file " << file_name << endl;
if (seq == 0) {
seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
};
while (count < process_count && fgets(line, 500, file_p) != NULL) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoi(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoi(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {
l = strlen(field);
for(unsigned int j =0; j< l; j++)
((h_columns_cuda_char[type_index[seq[i]]])->h_columns[j])[count] = field[j];
for(unsigned int j =l; j< (h_columns_cuda_char[type_index[seq[i]]])->mColumnCount; j++)
((h_columns_cuda_char[type_index[seq[i]]])->h_columns[j])[count] = 0;
};
};
count++;
};
mRecCount = count;
if(count < process_count) {
fclose(file_p);
return 1;
}
else
return 0;
}
void free() {
if (!seq)
delete seq;
for(unsigned int i = 0; i < mColumnCount; i++ ) {
if(type[i] == 2 && h_columns_cuda_char.size() > 0 && prm.size() == 0)
delete h_columns_cuda_char[type_index[i]];
};
delete type;
delete cols;
if(!columnGroups.empty() && mRecCount !=0 && grp != NULL)
cudaFree(grp);
for ( map<string, std::vector<unsigned int*> >::iterator it=prm.begin() ; it != prm.end(); ++it ) {
for(unsigned int i = 0; i < prm[(*it).first].size(); i++)
delete [] prm[(*it).first][i];
};
};
bool* logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* compare(int_type* column1, int_type d, int_type op_type)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* compare(int_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);;
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* op(int_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* op(int_type* column1, int_type* column2, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
}
return thrust::raw_pointer_cast(temp);
}
float_type* op(float_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* op(int_type* column1, int_type d, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
};
return thrust::raw_pointer_cast(temp);
}
float_type* op(int_type* column1, float_type d, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
return thrust::raw_pointer_cast(temp);
}
float_type* op(float_type* column1, float_type d, string op_type,int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
protected: // methods
void initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name) // compressed data for DIM tables
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
unsigned int cnt, grp_count;
file_p = NULL;
FILE* f;
char f1[100];
fact_table = 0;
mRecCount = Recs;
load_file_name = file_name;
//std::clock_t start1 = std::clock();
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
strcpy(f1, file_name);
strcat(f1,".");
char col_pos[3];
itoaa(colsRef.front(),col_pos);
strcat(f1,col_pos); // read the size of a segment
f = fopen (f1 , "rb" );
fread((char *)&cnt, 4, 1, f);
// cout << "creating host " << cnt << endl;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type>(cnt + 9));
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
fseeko(f, cnt*8 + 12, SEEK_SET);
fread((char *)&grp_count, 4, 1, f);
h_columns_cuda_char.push_back(new CudaChar(sizeRef.front(), Recs, 0, cnt*8 + 14*4 + grp_count*sizeRef.front()));
type_index[i] = h_columns_cuda_char.size()-1;
};
fclose(f);
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
//std::cout<< "create time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
};
void initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
file_p = NULL;
mRecCount = Recs;
segCount = 1;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type>());
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_cuda_char.push_back(new CudaChar(sizeRef.front(), Recs, 1));
type_index[i] = h_columns_cuda_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void initialize(unsigned int RecordCount, unsigned int ColumnCount)
{
mRecCount = RecordCount;
mColumnCount = ColumnCount;
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
seq = 0;
for(unsigned int i =0; i < mColumnCount; i++)
cols[i] = i;
};
void initialize(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = Recs;
mColumnCount = op_sel.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
map<string,int>::iterator it;
map<int,string> columnNames1;
seq = 0;
unsigned int i = 0;
while(!op_sel_as.empty()) {
columnNames[op_sel_as.front()] = i;
op_sel_as.pop();
i++;
};
if (Recs != 0) {
unsigned int index;
for(unsigned int i=0; i < mColumnCount; i++) {
if((it = a->columnNames.find(op_sel.front())) != a->columnNames.end()) {
index = it->second;
cols[i] = i;
decimal[i] = a->decimal[i];
if ((a->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_cuda_char.push_back(new CudaChar((a->h_columns_cuda_char[a->type_index[index]])->mColumnCount, Recs, 1));
type[i] = 2;
type_index[i] = h_columns_cuda_char.size()-1;
};
}
else {
it = b->columnNames.find(op_sel.front());
index = it->second;
cols[i] = i;
decimal[i] = b->decimal[index];
if ((b->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((b->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_cuda_char.push_back(new CudaChar((b->h_columns_cuda_char[a->type_index[index]])->mColumnCount, Recs, 1));
type[i] = 2;
type_index[i] = h_columns_cuda_char.size()-1;
};
}
op_sel.pop();
};
};
}
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void* LoadBuffers1(void* file_name)
{
void* p = 0;
LoadBuffers(file_name);
return p;
}
void LoadBuffers(void* file_name)
{
char str[100];
char col_pos[3];
unsigned int cnt;
long long int lower_val, upper_val;
map<unsigned int,unsigned int> counts;
bool check_res = 0;
FILE* f;
while(runningRecs < totalRecs && !check_res) {
for(unsigned int i = 0; i< th->mColumnCount; i++) {
strcpy(str, (char*)file_name);
strcat(str,".");
itoaa(th->cols[i],col_pos);
strcat(str,col_pos);
if (th->filePointers.find(str) == th->filePointers.end())
th->filePointers[str] = fopen(str, "rb");
f = th->filePointers[str];
if (th->type[i] == 0 || (th->type[i] == 1 && th->decimal[i])) {
fread(&cnt, 4, 1, f);
counts[i] = cnt;
fread(&lower_val, 8, 1, f);
fread(&upper_val, 8, 1, f);
unsigned int max_resize;
if(cnt == 1)
max_resize = 2;
else
max_resize = cnt;
//cout << "segment upper lower " << upper_val << " " << lower_val << endl;
if (th->type[i] == 0) {
if(cnt > th->h_columns_int[th->type_index[i]].size())
th->h_columns_int[th->type_index[i]].resize(max_resize);
(th->h_columns_int[th->type_index[i]])[0] = lower_val;
(th->h_columns_int[th->type_index[i]])[1] = upper_val;
}
else {
if(cnt > th->h_columns_float[th->type_index[i]].size())
th->h_columns_float[th->type_index[i]].resize(max_resize);
(th->h_columns_float[th->type_index[i]])[0] = ((float_type)lower_val)/100.0;
(th->h_columns_float[th->type_index[i]])[1] = ((float_type)upper_val)/100.0;
};
}
};
if(!top_type[th->name].empty()) {
check_res = zone_map_check(top_type[th->name],top_value[th->name],top_nums[th->name],top_nums_f[th->name],th);
//cout << "check result " << check_res << endl;
if (!check_res) { // do not process segment, move the pointers to the next segment
runningRecs = runningRecs + th->maxRecs;
if (runningRecs >= totalRecs) {
buffersEmpty = 1;
buffersLoaded = 1;
return;
}
else {
// adjust file pointers
for(int z = 0; z< th->mColumnCount; z++) {
strcpy(str, (char*)file_name);
strcat(str,".");
itoaa(th->cols[z],col_pos);
strcat(str,col_pos);
f = th->filePointers[str];
if (th->type[z] == 0 || (th->type[z] == 1 && th->decimal[z]))
fseeko(f, counts[z]*8 + 44, SEEK_CUR);
else if (th->type[z] == 1 && !th->decimal[z])
fseeko(f, counts[z]*8 + 8, SEEK_CUR);
else {
unsigned int grp_count;
CudaChar *c = th->h_columns_cuda_char[th->type_index[z]];
fread(&cnt, 4, 1, f);
fseeko(f,cnt*8 + 8,SEEK_CUR);
fread(&grp_count, 4, 1, f);
fseeko(f,grp_count*c->mColumnCount,SEEK_CUR);
};
};
};
};
}
else
check_res = 1;
};
for(unsigned int i = 0; i< th->mColumnCount; i++) {
strcpy(str, (char*)file_name);
strcat(str,".");
itoaa(th->cols[i],col_pos);
strcat(str,col_pos);
f = th->filePointers[str];
if (th->type[i] == 0) {
//fread(&cnt, 4, 1, f);
//fread(&lower_val, 8, 1, f);
//fread(&upper_val, 8, 1, f);
fread(th->h_columns_int[th->type_index[i]].data(),counts[i]*8,1,f);
}
else if (th->type[i] == 1 && th->decimal[i]) {
fread(th->h_columns_float[th->type_index[i]].data(),counts[i]*8,1,f);
}
else if (th->type[i] == 1 && !th->decimal[i]) {
unsigned int grp_count;
fread(&cnt, 4, 1, f);
fread(th->h_columns_float[th->type_index[i]].data(),cnt*8,1,f);
fread(&grp_count, 4, 1, f);
}
else {
unsigned int grp_count;
CudaChar *c = th->h_columns_cuda_char[th->type_index[i]];
fread(&cnt, 4, 1, f);
if(!c->compressed.size())
c->compressed.resize(cnt*8);
fread(c->compressed.data(),cnt*8,1,f);
fread(&grp_count, 4, 1, f);
fread(&grp_count, 4, 1, f);
fread(&grp_count, 4, 1, f);
for(unsigned int j = 0; j < c->mColumnCount; j++) {
if(c->h_columns[j].size() < grp_count)
c->h_columns[j].resize(grp_count);
fread(c->h_columns[j].data(),grp_count,1,f);
};
};
};
buffersLoaded = 1;
}
unsigned int findSegmentCount(char* file_name)
{
unsigned int orig_recCount;
unsigned int comp_type, cnt;
FILE* f = fopen ( file_name , "rb" );
if (f==NULL) {
cout << "Cannot open file " << file_name << endl;
exit (1);
}
fread(&cnt, 4, 1, f);
fseeko(f, cnt*8 + 16, SEEK_CUR);
fread(&comp_type, 4, 1, f);
if(comp_type == 2)
fread(&orig_recCount, 4, 1, f);
else if(comp_type == 3)
orig_recCount = cnt;
else {
fread(&orig_recCount, 4, 1, f);
fread(&orig_recCount, 4, 1, f);
};
fclose(f);
return orig_recCount;
};
void allocColumns(CudaSet* a, queue<string> fields)
{
while(!fields.empty()) {
if(setMap.count(fields.front()) > 0) {
CudaSet *t = varNames[setMap[fields.front()]];
unsigned int idx = t->columnNames[fields.front()];
bool onDevice = 0;
if(t->type[idx] == 0) {
if(t->d_columns_int[t->type_index[idx]].size() > 0)
onDevice = 1;
}
else if(t->type[idx] == 1) {
if(t->d_columns_float[t->type_index[idx]].size() > 0)
onDevice = 1;
}
else {
if((t->h_columns_cuda_char[t->type_index[idx]])->d_columns[0].size() > 0)
onDevice = 1;
};
if (!onDevice)
t->allocColumnOnDevice(t->columnNames[fields.front()], t->maxRecs);
};
fields.pop();
};
}
unsigned int largest_prm(CudaSet* a, string field)
{
unsigned int maxx = 0;
for(unsigned int i = 0; i < a->prm_count[setMap[field]].size(); i++)
if(maxx < a->prm_count[setMap[field]][i])
maxx = a->prm_count[setMap[field]][i];
return maxx;
};
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment)
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
//find the largest possible size of a gathered segment
if(segment == 0) {
unsigned int max_count = 0;
if(a->prm.size() > 0) {
for(unsigned int i = 0; i < a->segCount; i++)
if(!a->isJoined) { // alloc just 1 segment
if (a->prm_count[setMap[field]][i] > max_count)
max_count = a->prm_count[setMap[field]][i];
}
else {
// alloc entire result
max_count = max_count + a->prm_count[setMap[field]][i];
};
}
else {
max_count = t->maxRecs;
};
a->allocColumnOnDevice(idx, max_count);
};
if(!a->isJoined) {
if(a->prm.size() > 0) {
unsigned int g_size = a->prm_count[setMap[field]][segment];
//cout << "largest prm " << largest_prm(a, field) << endl;
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
if(curr_segment != segment) {
std::clock_t start2 = std::clock();
cudaMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][segment],
4*g_size, cudaMemcpyHostToDevice);
curr_segment = segment;
};
if(t->type[tindex] == 0)
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin());
else if(t->type[tindex] == 1)
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin());
else
for(unsigned int j=0; j < (t->h_columns_cuda_char[t->type_index[tindex]])->mColumnCount; j++)
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
(t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].begin(),
(a->h_columns_cuda_char[a->type_index[idx]])->d_columns[j].begin());
a->mRecCount = g_size;
}
else {
if(t->type[tindex] == 0) {
thrust::copy(t->d_columns_int[t->type_index[tindex]].begin(), t->d_columns_int[t->type_index[tindex]].end(),
a->d_columns_int[a->type_index[idx]].begin());
a->mRecCount = t->d_columns_int[t->type_index[tindex]].end() - t->d_columns_int[t->type_index[tindex]].begin();
}
else if(t->type[tindex] == 1) {
thrust::copy(t->d_columns_float[t->type_index[tindex]].begin(), t->d_columns_float[t->type_index[tindex]].end(),
a->d_columns_float[a->type_index[idx]].begin());
a->mRecCount = t->d_columns_float[t->type_index[tindex]].end() - t->d_columns_float[t->type_index[tindex]].begin();
}
else {
for(unsigned int j=0; j < (t->h_columns_cuda_char[t->type_index[tindex]])->mColumnCount; j++) {
thrust::copy((t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].begin(),(t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].end(),
(a->h_columns_cuda_char[a->type_index[idx]])->d_columns[j].begin());
a->mRecCount = (t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].end() - (t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].begin();
};
};
};
}
else {
// modify prm
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
unsigned int g_size = a->prm_count[setMap[field]][0];
cudaMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][0],
4*g_size, cudaMemcpyHostToDevice);
if (segment != 0)
thrust::transform(a->prm_d.begin(), a->prm_d.begin() + g_size,
a->prm_d.begin(), _1 - t->maxRecs);
if(t->type[tindex] == 0)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin()+g_size, a->prm_d.begin(),
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin(), _1 < t->maxRecs );
else if(t->type[tindex] == 1)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin()+g_size, a->prm_d.begin(),
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin(), _1 < t->maxRecs);
else
for(unsigned int j=0; j < (t->h_columns_cuda_char[t->type_index[tindex]])->mColumnCount; j++)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin()+g_size, a->prm_d.begin(),
(t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].begin(),
(a->h_columns_cuda_char[a->type_index[idx]])->d_columns[j].begin(), _1 < t->maxRecs);
};
}
void gatherColumnsJoin(CudaSet* a, CudaSet* t, string field, unsigned int segment, thrust::device_ptr<unsigned int>& m, unsigned int count )
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
//cout << "gathering " << field << " " << setMap[field] << " " << tindex << " " << idx << " " << a->segCount << endl;
if(!a->isJoined) {
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
unsigned int g_size = a->prm_count[setMap[field]][segment];
cudaMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][segment],
4*g_size, cudaMemcpyHostToDevice);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), m + count);
}
else {
// modify prm
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
unsigned int g_size = a->prm_count[setMap[field]][0];
if(segment == 0)
cudaMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][0],
4*g_size, cudaMemcpyHostToDevice);
if (segment != 0)
thrust::transform(a->prm_d.begin(), a->prm_d.begin() + g_size,
a->prm_d.begin(), _1 - t->maxRecs);
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin() + g_size, a->prm_d.begin(),
t->d_columns_int[t->type_index[tindex]].begin(), m, _1 < t->maxRecs );
};
//a->mRecCount = a->prm[setMap[field]][segment].end() - a->prm[setMap[field]][segment].begin();
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment)
{
set<string> uniques;
CudaSet *t;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && setMap.count(fields.front()) > 0) {
t = varNames[setMap[fields.front()]];
if (!a->isJoined) {
t->CopyColumnToGpu(t->columnNames[fields.front()], segment); // segment i
if (a != t) {
gatherColumns(a, t, fields.front(), segment);
};
}
else {
//for all segments do copy and gather
unsigned int tindex = t->columnNames[fields.front()];
unsigned int idx = a->columnNames[fields.front()];
string field = fields.front();
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
unsigned int g_size = a->prm_count[setMap[field]][0];
cudaMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][0], 4*g_size, cudaMemcpyHostToDevice);
for(unsigned int i = 0; i < t->segCount; i++) {
t->CopyColumnToGpu(t->columnNames[field], i); // segment i
if (i != 0) {
thrust::transform(a->prm_d.begin(), a->prm_d.begin() + g_size,
a->prm_d.begin(), _1 - t->maxRecs);
}
else {
a->allocColumnOnDevice(a->columnNames[field], g_size);
};
if(t->type[tindex] == 0)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin() + g_size, a->prm_d.begin(),
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin(), _1 < t->maxRecs );
else if(t->type[tindex] == 1)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin() + g_size, a->prm_d.begin(),
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin(), _1 < t->maxRecs);
else
for(unsigned int j=0; j < (t->h_columns_cuda_char[t->type_index[tindex]])->mColumnCount; j++)
thrust::gather_if(a->prm_d.begin(), a->prm_d.begin() + g_size, a->prm_d.begin(),
(t->h_columns_cuda_char[t->type_index[tindex]])->d_columns[j].begin(),
(a->h_columns_cuda_char[a->type_index[idx]])->d_columns[j].begin(), _1 < t->maxRecs);
};
if (t->segCount != 1)
thrust::transform(a->prm_d.begin(), a->prm_d.begin() + g_size,
a->prm_d.begin(), _1 + (t->maxRecs*(t->segCount-1)));
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void copyGatherJoin(CudaSet* a, thrust::device_ptr<unsigned int>& m, string field, unsigned int segment, unsigned int& cnt )
{
CudaSet *t;
t = varNames[setMap[field]];
unsigned int tindex = t->columnNames[field];
if (!a->isJoined) {
t->CopyColumnToGpu(t->columnNames[field], segment); // segment i
if(a != t) {
//gatherColumns(a, t, field, segment);
gatherColumnsJoin(a, t, field, segment, m, cnt);
cnt = cnt + a->prm_count[setMap[field]][segment];
}
else
cnt = t->mRecCount;
}
else {
//for all segments do copy and gather
for(unsigned int i = 0; i < t->segCount; i++) {
t->CopyColumnToGpu(t->columnNames[field], i); // segment i
gatherColumnsJoin(a, t, field, i, m, cnt);
};
// transform prm back
//a->prm_d = a->prm[setMap[field]][0];
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a, field));
unsigned int g_size = a->prm_count[setMap[field]][0];
cudaMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[setMap[field]][0], 4*g_size, cudaMemcpyHostToDevice);
if (t->segCount != 1)
thrust::transform(a->prm_d.begin(), a->prm_d.begin() + g_size,
a->prm_d.begin(), _1 + (t->maxRecs*(t->segCount-1)));
cnt = cnt + g_size;
};
}
|
f49b198f326cf5ba7818d8af6ba2db1bc5f4a177.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
int bx = blockDim.x;
int i = blockIdx.x * bx + threadIdx.x;
int j = blockIdx.y;
float sum = 0.0;
for( int k = 0; k < p; ++k )
sum += b[k*pitch_b+i] * c[k*pitch_c+j];
a[j+pitch_a*i] = sum;
}
| f49b198f326cf5ba7818d8af6ba2db1bc5f4a177.cu | extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
int bx = blockDim.x;
int i = blockIdx.x * bx + threadIdx.x;
int j = blockIdx.y;
float sum = 0.0;
for( int k = 0; k < p; ++k )
sum += b[k*pitch_b+i] * c[k*pitch_c+j];
a[j+pitch_a*i] = sum;
}
|
40735239493165966ac140ce0bd53132bc861e42.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "__l2dist.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int lda = 1;
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int ldb = 1;
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int ldc = 1;
int d = 1;
int nrows = 1;
int ncols = 1;
float p = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
__l2dist), dim3(gridBlock),dim3(threadBlock), 0, 0, A,lda,B,ldb,C,ldc,d,nrows,ncols,p);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
__l2dist), dim3(gridBlock),dim3(threadBlock), 0, 0, A,lda,B,ldb,C,ldc,d,nrows,ncols,p);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
__l2dist), dim3(gridBlock),dim3(threadBlock), 0, 0, A,lda,B,ldb,C,ldc,d,nrows,ncols,p);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 40735239493165966ac140ce0bd53132bc861e42.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "__l2dist.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int lda = 1;
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int ldb = 1;
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int ldc = 1;
int d = 1;
int nrows = 1;
int ncols = 1;
float p = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
__l2dist<<<gridBlock,threadBlock>>>(A,lda,B,ldb,C,ldc,d,nrows,ncols,p);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
__l2dist<<<gridBlock,threadBlock>>>(A,lda,B,ldb,C,ldc,d,nrows,ncols,p);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
__l2dist<<<gridBlock,threadBlock>>>(A,lda,B,ldb,C,ldc,d,nrows,ncols,p);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
469a99bdfad0394549cfcf14c72d75f05d34cb25.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*
*
* This sample illustrates the usage of CUDA streams for overlapping
* kernel execution with device/host memcopies. The kernel is used to
* initialize an array to a specific value, after which the array is
* copied to the host (CPU) memory. To increase performance, multiple
* kernel/memcopy pairs are launched asynchronously, each pair in its
* own stream. Devices with Compute Capability 1.1 can overlap a kernel
* and a memcopy as long as they are issued in different streams. Kernels
* are serialized. Thus, if n pairs are launched, streamed approach
* can reduce the memcopy cost to the (1/n)th of a single copy of the entire
* data set.
*
* Additionally, this sample uses CUDA events to measure elapsed time for
* CUDA calls. Events are a part of CUDA API and provide a system independent
* way to measure execution times on CUDA devices with approximately 0.5
* microsecond precision.
*
* Elapsed times are averaged over nreps repetitions (10 by default).
*
*/
const char *sSDKsample = "simpleStreams";
const char *sEventSyncMethod[] =
{
"hipEventDefault",
"hipEventBlockingSync",
"hipEventDisableTiming",
NULL
};
const char *sDeviceSyncMethod[] =
{
"hipDeviceScheduleAuto",
"hipDeviceScheduleSpin",
"hipDeviceScheduleYield",
"INVALID",
"hipDeviceScheduleBlockingSync",
NULL
};
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#ifndef WIN32
#include <sys/mman.h> // for mmap() / munmap()
#endif
// Macro to aligned up to the memory size in question
#define MEMORY_ALIGNMENT 4096
#define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) )
__global__ void init_array(int *g_data, int *factor, int num_iterations)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i=0; i<num_iterations; i++)
{
g_data[idx] += *factor; // non-coalesced on purpose, to burn time
}
}
bool correct_data(int *a, const int n, const int c)
{
for (int i = 0; i < n; i++)
{
if (a[i] != c)
{
printf("%d: %d %d\n", i, a[i], c);
return false;
}
}
return true;
}
inline void
AllocateHostMemory(bool bPinGenericMemory, int **pp_a, int **ppAligned_a, int nbytes)
{
#if CUDART_VERSION >= 4000
#if !defined(__arm__) && !defined(__aarch64__)
if (bPinGenericMemory)
{
// allocate a generic page-aligned chunk of system memory
#ifdef WIN32
printf("> VirtualAlloc() allocating %4.2f Mbytes of (generic page-aligned system memory)\n", (float)nbytes/1048576.0f);
*pp_a = (int *) VirtualAlloc(NULL, (nbytes + MEMORY_ALIGNMENT), MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
#else
printf("> mmap() allocating %4.2f Mbytes (generic page-aligned system memory)\n", (float)nbytes/1048576.0f);
*pp_a = (int *) mmap(NULL, (nbytes + MEMORY_ALIGNMENT), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
#endif
*ppAligned_a = (int *)ALIGN_UP(*pp_a, MEMORY_ALIGNMENT);
printf("> hipHostRegister() registering %4.2f Mbytes of generic allocated system memory\n", (float)nbytes/1048576.0f);
// pin allocate memory
checkCudaErrors(hipHostRegister(*ppAligned_a, nbytes, hipHostRegisterMapped));
}
else
#endif
#endif
{
printf("> hipHostMalloc() allocating %4.2f Mbytes of system memory\n", (float)nbytes/1048576.0f);
// allocate host memory (pinned is required for achieve asynchronicity)
checkCudaErrors(hipHostMalloc((void **)pp_a, nbytes));
*ppAligned_a = *pp_a;
}
}
inline void
FreeHostMemory(bool bPinGenericMemory, int **pp_a, int **ppAligned_a, int nbytes)
{
#if CUDART_VERSION >= 4000
#if !defined(__arm__) && !defined(__aarch64__)
// CUDA 4.0 support pinning of generic host memory
if (bPinGenericMemory)
{
// unpin and delete host memory
checkCudaErrors(hipHostUnregister(*ppAligned_a));
#ifdef WIN32
VirtualFree(*pp_a, 0, MEM_RELEASE);
#else
munmap(*pp_a, nbytes);
#endif
}
else
#endif
#endif
{
hipHostFree(*pp_a);
}
}
static const char *sSyncMethod[] =
{
"0 (Automatic Blocking)",
"1 (Spin Blocking)",
"2 (Yield Blocking)",
"3 (Undefined Blocking Method)",
"4 (Blocking Sync Event) = low CPU utilization",
NULL
};
void printHelp()
{
printf("Usage: %s [options below]\n", sSDKsample);
printf("\t--sync_method=n for CPU/GPU synchronization\n");
printf("\t n=%s\n", sSyncMethod[0]);
printf("\t n=%s\n", sSyncMethod[1]);
printf("\t n=%s\n", sSyncMethod[2]);
printf("\t <Default> n=%s\n", sSyncMethod[4]);
printf("\t--use_generic_memory (default) use generic page-aligned for system memory\n");
printf("\t--use_cuda_malloc_host (optional) use hipHostMalloc to allocate system memory\n");
}
#if defined(__APPLE__) || defined(MACOSX)
#define DEFAULT_PINNED_GENERIC_MEMORY false
#else
#define DEFAULT_PINNED_GENERIC_MEMORY true
#endif
int main(int argc, char **argv)
{
int cuda_device = 0;
int nstreams = 4; // number of streams for CUDA calls
int nreps = 10; // number of times each experiment is repeated
int n = 16 * 1024 * 1024; // number of ints in the data set
int nbytes = n * sizeof(int); // number of data bytes
dim3 threads, blocks; // kernel launch configuration
float elapsed_time, time_memcpy, time_kernel; // timing variables
float scale_factor = 1.0f;
// allocate generic memory and pin it laster instead of using hipHostMalloc()
bool bPinGenericMemory = DEFAULT_PINNED_GENERIC_MEMORY; // we want this to be the default behavior
int device_sync_method = hipDeviceScheduleBlockingSync; // by default we use BlockingSync
int niterations; // number of iterations for the loop inside the kernel
printf("[ %s ]\n\n", sSDKsample);
if (checkCmdLineFlag(argc, (const char **)argv, "help"))
{
printHelp();
return EXIT_SUCCESS;
}
if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0)
{
if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4)
{
printf("Device synchronization method set to = %s\n", sSyncMethod[device_sync_method]);
printf("Setting reps to 100 to demonstrate steady state\n");
nreps = 100;
}
else
{
printf("Invalid command line option sync_method=\"%d\"\n", device_sync_method);
return EXIT_FAILURE;
}
}
else
{
printHelp();
return EXIT_SUCCESS;
}
if (checkCmdLineFlag(argc, (const char **)argv, "use_generic_memory"))
{
#if defined(__APPLE__) || defined(MACOSX)
bPinGenericMemory = false; // Generic Pinning of System Paged memory not currently supported on Mac OSX
#else
bPinGenericMemory = true;
#endif
}
if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host"))
{
bPinGenericMemory = false;
}
printf("\n> ");
cuda_device = findCudaDevice(argc, (const char **)argv);
// check the compute capability of the device
int num_devices=0;
checkCudaErrors(hipGetDeviceCount(&num_devices));
if (0==num_devices)
{
printf("your system does not have a CUDA capable device, waiving test...\n");
return EXIT_WAIVED;
}
// check if the command-line chosen device ID is within range, exit if not
if (cuda_device >= num_devices)
{
printf("cuda_device=%d is invalid, must choose device ID between 0 and %d\n", cuda_device, num_devices-1);
return EXIT_FAILURE;
}
checkCudaErrors(hipSetDevice(cuda_device));
// Checking for compute capabilities
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
if ((1 == deviceProp.major) && (deviceProp.minor < 1))
{
printf("%s does not have Compute Capability 1.1 or newer. Reducing workload.\n", deviceProp.name);
}
if (deviceProp.major >= 2)
{
niterations = 5;
}
else
{
if (deviceProp.minor > 1)
{
niterations = 5;
}
else
{
niterations = 1; // reduced workload for compute capability 1.0 and 1.1
}
}
// Check if GPU can map host memory (Generic Method), if not then we override bPinGenericMemory to be false
if (bPinGenericMemory)
{
printf("Device: <%s> canMapHostMemory: %s\n", deviceProp.name, deviceProp.canMapHostMemory ? "Yes" : "No");
if (deviceProp.canMapHostMemory == 0)
{
printf("Using hipHostMalloc, CUDA device does not support mapping of generic host memory\n");
bPinGenericMemory = false;
}
}
// Anything that is less than 32 Cores will have scaled down workload
scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f);
n = (int)rint((float)n / scale_factor);
printf("> CUDA Capable: SM %d.%d hardware\n", deviceProp.major, deviceProp.minor);
printf("> %d Multiprocessor(s) x %d (Cores/Multiprocessor) = %d (Cores)\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf("> scale_factor = %1.4f\n", 1.0f/scale_factor);
printf("> array_size = %d\n\n", n);
// enable use of blocking sync, to reduce CPU usage
printf("> Using CPU/GPU Device Synchronization method (%s)\n", sDeviceSyncMethod[device_sync_method]);
checkCudaErrors(hipSetDeviceFlags(device_sync_method | (bPinGenericMemory ? hipDeviceMapHost : 0)));
// allocate host memory
int c = 5; // value to which the array will be initialized
int *h_a = 0; // pointer to the array data in host memory
int *hAligned_a = 0; // pointer to the array data in host memory (aligned to MEMORY_ALIGNMENT)
// Allocate Host memory (could be using hipHostMalloc or VirtualAlloc/mmap if using the new CUDA 4.0 features
AllocateHostMemory(bPinGenericMemory, &h_a, &hAligned_a, nbytes);
// allocate device memory
int *d_a = 0, *d_c = 0; // pointers to data and init value in the device memory
checkCudaErrors(hipMalloc((void **)&d_a, nbytes));
checkCudaErrors(hipMalloc((void **)&d_c, sizeof(int)));
checkCudaErrors(hipMemcpy(d_c, &c, sizeof(int), hipMemcpyHostToDevice));
printf("\nStarting Test\n");
// allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
for (int i = 0; i < nstreams; i++)
{
checkCudaErrors(hipStreamCreate(&(streams[i])));
}
// create CUDA event handles
// use blocking sync
hipEvent_t start_event, stop_event;
int eventflags = ((device_sync_method == hipDeviceScheduleBlockingSync) ? hipEventBlockingSync: hipEventDefault);
checkCudaErrors(hipEventCreateWithFlags(&start_event, eventflags));
checkCudaErrors(hipEventCreateWithFlags(&stop_event, eventflags));
// time memcopy from device
checkCudaErrors(hipEventRecord(start_event, 0)); // record in stream-0, to ensure that all previous CUDA calls have completed
checkCudaErrors(hipMemcpyAsync(hAligned_a, d_a, nbytes, hipMemcpyDeviceToHost, streams[0]));
checkCudaErrors(hipEventRecord(stop_event, 0));
checkCudaErrors(hipEventSynchronize(stop_event)); // block until the event is actually recorded
checkCudaErrors(hipEventElapsedTime(&time_memcpy, start_event, stop_event));
printf("memcopy:\t%.2f\n", time_memcpy);
// time kernel
threads=dim3(512, 1);
blocks=dim3(n / threads.x, 1);
checkCudaErrors(hipEventRecord(start_event, 0));
hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[0], d_a, d_c, niterations);
checkCudaErrors(hipEventRecord(stop_event, 0));
checkCudaErrors(hipEventSynchronize(stop_event));
checkCudaErrors(hipEventElapsedTime(&time_kernel, start_event, stop_event));
printf("kernel:\t\t%.2f\n", time_kernel);
//////////////////////////////////////////////////////////////////////
// time non-streamed execution for reference
threads=dim3(512, 1);
blocks=dim3(n / threads.x, 1);
checkCudaErrors(hipEventRecord(start_event, 0));
for (int k = 0; k < nreps; k++)
{
hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, 0, d_a, d_c, niterations);
checkCudaErrors(hipMemcpy(hAligned_a, d_a, nbytes, hipMemcpyDeviceToHost));
}
checkCudaErrors(hipEventRecord(stop_event, 0));
checkCudaErrors(hipEventSynchronize(stop_event));
checkCudaErrors(hipEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("non-streamed:\t%.2f\n", elapsed_time / nreps);
//////////////////////////////////////////////////////////////////////
// time execution with nstreams streams
threads=dim3(512,1);
blocks=dim3(n/(nstreams*threads.x),1);
memset(hAligned_a, 255, nbytes); // set host memory bits to all 1s, for testing correctness
checkCudaErrors(hipMemset(d_a, 0, nbytes)); // set device memory to all 0s, for testing correctness
checkCudaErrors(hipEventRecord(start_event, 0));
for (int k = 0; k < nreps; k++)
{
// asynchronously launch nstreams kernels, each operating on its own portion of data
for (int i = 0; i < nstreams; i++)
{
hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[i], d_a + i *n / nstreams, d_c, niterations);
}
// asynchronously launch nstreams memcopies. Note that memcopy in stream x will only
// commence executing when all previous CUDA calls in stream x have completed
for (int i = 0; i < nstreams; i++)
{
checkCudaErrors(hipMemcpyAsync(hAligned_a + i * n / nstreams, d_a + i * n / nstreams, nbytes / nstreams, hipMemcpyDeviceToHost, streams[i]));
}
}
checkCudaErrors(hipEventRecord(stop_event, 0));
checkCudaErrors(hipEventSynchronize(stop_event));
checkCudaErrors(hipEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("%d streams:\t%.2f\n", nstreams, elapsed_time / nreps);
// check whether the output is correct
printf("-------------------------------\n");
bool bResults = correct_data(hAligned_a, n, c*nreps*niterations);
// release resources
for (int i = 0; i < nstreams; i++)
{
checkCudaErrors(hipStreamDestroy(streams[i]));
}
checkCudaErrors(hipEventDestroy(start_event));
checkCudaErrors(hipEventDestroy(stop_event));
// Free hipHostMalloc or Generic Host allocated memory (from CUDA 4.0)
FreeHostMemory(bPinGenericMemory, &h_a, &hAligned_a, nbytes);
checkCudaErrors(hipFree(d_a));
checkCudaErrors(hipFree(d_c));
return bResults ? EXIT_SUCCESS : EXIT_FAILURE;
}
| 469a99bdfad0394549cfcf14c72d75f05d34cb25.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*
*
* This sample illustrates the usage of CUDA streams for overlapping
* kernel execution with device/host memcopies. The kernel is used to
* initialize an array to a specific value, after which the array is
* copied to the host (CPU) memory. To increase performance, multiple
* kernel/memcopy pairs are launched asynchronously, each pair in its
* own stream. Devices with Compute Capability 1.1 can overlap a kernel
* and a memcopy as long as they are issued in different streams. Kernels
* are serialized. Thus, if n pairs are launched, streamed approach
* can reduce the memcopy cost to the (1/n)th of a single copy of the entire
* data set.
*
* Additionally, this sample uses CUDA events to measure elapsed time for
* CUDA calls. Events are a part of CUDA API and provide a system independent
* way to measure execution times on CUDA devices with approximately 0.5
* microsecond precision.
*
* Elapsed times are averaged over nreps repetitions (10 by default).
*
*/
const char *sSDKsample = "simpleStreams";
const char *sEventSyncMethod[] =
{
"cudaEventDefault",
"cudaEventBlockingSync",
"cudaEventDisableTiming",
NULL
};
const char *sDeviceSyncMethod[] =
{
"cudaDeviceScheduleAuto",
"cudaDeviceScheduleSpin",
"cudaDeviceScheduleYield",
"INVALID",
"cudaDeviceScheduleBlockingSync",
NULL
};
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#ifndef WIN32
#include <sys/mman.h> // for mmap() / munmap()
#endif
// Macro to aligned up to the memory size in question
#define MEMORY_ALIGNMENT 4096
#define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) )
__global__ void init_array(int *g_data, int *factor, int num_iterations)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i=0; i<num_iterations; i++)
{
g_data[idx] += *factor; // non-coalesced on purpose, to burn time
}
}
bool correct_data(int *a, const int n, const int c)
{
for (int i = 0; i < n; i++)
{
if (a[i] != c)
{
printf("%d: %d %d\n", i, a[i], c);
return false;
}
}
return true;
}
inline void
AllocateHostMemory(bool bPinGenericMemory, int **pp_a, int **ppAligned_a, int nbytes)
{
#if CUDART_VERSION >= 4000
#if !defined(__arm__) && !defined(__aarch64__)
if (bPinGenericMemory)
{
// allocate a generic page-aligned chunk of system memory
#ifdef WIN32
printf("> VirtualAlloc() allocating %4.2f Mbytes of (generic page-aligned system memory)\n", (float)nbytes/1048576.0f);
*pp_a = (int *) VirtualAlloc(NULL, (nbytes + MEMORY_ALIGNMENT), MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
#else
printf("> mmap() allocating %4.2f Mbytes (generic page-aligned system memory)\n", (float)nbytes/1048576.0f);
*pp_a = (int *) mmap(NULL, (nbytes + MEMORY_ALIGNMENT), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
#endif
*ppAligned_a = (int *)ALIGN_UP(*pp_a, MEMORY_ALIGNMENT);
printf("> cudaHostRegister() registering %4.2f Mbytes of generic allocated system memory\n", (float)nbytes/1048576.0f);
// pin allocate memory
checkCudaErrors(cudaHostRegister(*ppAligned_a, nbytes, cudaHostRegisterMapped));
}
else
#endif
#endif
{
printf("> cudaMallocHost() allocating %4.2f Mbytes of system memory\n", (float)nbytes/1048576.0f);
// allocate host memory (pinned is required for achieve asynchronicity)
checkCudaErrors(cudaMallocHost((void **)pp_a, nbytes));
*ppAligned_a = *pp_a;
}
}
inline void
FreeHostMemory(bool bPinGenericMemory, int **pp_a, int **ppAligned_a, int nbytes)
{
#if CUDART_VERSION >= 4000
#if !defined(__arm__) && !defined(__aarch64__)
// CUDA 4.0 support pinning of generic host memory
if (bPinGenericMemory)
{
// unpin and delete host memory
checkCudaErrors(cudaHostUnregister(*ppAligned_a));
#ifdef WIN32
VirtualFree(*pp_a, 0, MEM_RELEASE);
#else
munmap(*pp_a, nbytes);
#endif
}
else
#endif
#endif
{
cudaFreeHost(*pp_a);
}
}
static const char *sSyncMethod[] =
{
"0 (Automatic Blocking)",
"1 (Spin Blocking)",
"2 (Yield Blocking)",
"3 (Undefined Blocking Method)",
"4 (Blocking Sync Event) = low CPU utilization",
NULL
};
void printHelp()
{
printf("Usage: %s [options below]\n", sSDKsample);
printf("\t--sync_method=n for CPU/GPU synchronization\n");
printf("\t n=%s\n", sSyncMethod[0]);
printf("\t n=%s\n", sSyncMethod[1]);
printf("\t n=%s\n", sSyncMethod[2]);
printf("\t <Default> n=%s\n", sSyncMethod[4]);
printf("\t--use_generic_memory (default) use generic page-aligned for system memory\n");
printf("\t--use_cuda_malloc_host (optional) use cudaMallocHost to allocate system memory\n");
}
#if defined(__APPLE__) || defined(MACOSX)
#define DEFAULT_PINNED_GENERIC_MEMORY false
#else
#define DEFAULT_PINNED_GENERIC_MEMORY true
#endif
int main(int argc, char **argv)
{
int cuda_device = 0;
int nstreams = 4; // number of streams for CUDA calls
int nreps = 10; // number of times each experiment is repeated
int n = 16 * 1024 * 1024; // number of ints in the data set
int nbytes = n * sizeof(int); // number of data bytes
dim3 threads, blocks; // kernel launch configuration
float elapsed_time, time_memcpy, time_kernel; // timing variables
float scale_factor = 1.0f;
// allocate generic memory and pin it laster instead of using cudaHostAlloc()
bool bPinGenericMemory = DEFAULT_PINNED_GENERIC_MEMORY; // we want this to be the default behavior
int device_sync_method = cudaDeviceBlockingSync; // by default we use BlockingSync
int niterations; // number of iterations for the loop inside the kernel
printf("[ %s ]\n\n", sSDKsample);
if (checkCmdLineFlag(argc, (const char **)argv, "help"))
{
printHelp();
return EXIT_SUCCESS;
}
if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0)
{
if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4)
{
printf("Device synchronization method set to = %s\n", sSyncMethod[device_sync_method]);
printf("Setting reps to 100 to demonstrate steady state\n");
nreps = 100;
}
else
{
printf("Invalid command line option sync_method=\"%d\"\n", device_sync_method);
return EXIT_FAILURE;
}
}
else
{
printHelp();
return EXIT_SUCCESS;
}
if (checkCmdLineFlag(argc, (const char **)argv, "use_generic_memory"))
{
#if defined(__APPLE__) || defined(MACOSX)
bPinGenericMemory = false; // Generic Pinning of System Paged memory not currently supported on Mac OSX
#else
bPinGenericMemory = true;
#endif
}
if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host"))
{
bPinGenericMemory = false;
}
printf("\n> ");
cuda_device = findCudaDevice(argc, (const char **)argv);
// check the compute capability of the device
int num_devices=0;
checkCudaErrors(cudaGetDeviceCount(&num_devices));
if (0==num_devices)
{
printf("your system does not have a CUDA capable device, waiving test...\n");
return EXIT_WAIVED;
}
// check if the command-line chosen device ID is within range, exit if not
if (cuda_device >= num_devices)
{
printf("cuda_device=%d is invalid, must choose device ID between 0 and %d\n", cuda_device, num_devices-1);
return EXIT_FAILURE;
}
checkCudaErrors(cudaSetDevice(cuda_device));
// Checking for compute capabilities
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
if ((1 == deviceProp.major) && (deviceProp.minor < 1))
{
printf("%s does not have Compute Capability 1.1 or newer. Reducing workload.\n", deviceProp.name);
}
if (deviceProp.major >= 2)
{
niterations = 5;
}
else
{
if (deviceProp.minor > 1)
{
niterations = 5;
}
else
{
niterations = 1; // reduced workload for compute capability 1.0 and 1.1
}
}
// Check if GPU can map host memory (Generic Method), if not then we override bPinGenericMemory to be false
if (bPinGenericMemory)
{
printf("Device: <%s> canMapHostMemory: %s\n", deviceProp.name, deviceProp.canMapHostMemory ? "Yes" : "No");
if (deviceProp.canMapHostMemory == 0)
{
printf("Using cudaMallocHost, CUDA device does not support mapping of generic host memory\n");
bPinGenericMemory = false;
}
}
// Anything that is less than 32 Cores will have scaled down workload
scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f);
n = (int)rint((float)n / scale_factor);
printf("> CUDA Capable: SM %d.%d hardware\n", deviceProp.major, deviceProp.minor);
printf("> %d Multiprocessor(s) x %d (Cores/Multiprocessor) = %d (Cores)\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf("> scale_factor = %1.4f\n", 1.0f/scale_factor);
printf("> array_size = %d\n\n", n);
// enable use of blocking sync, to reduce CPU usage
printf("> Using CPU/GPU Device Synchronization method (%s)\n", sDeviceSyncMethod[device_sync_method]);
checkCudaErrors(cudaSetDeviceFlags(device_sync_method | (bPinGenericMemory ? cudaDeviceMapHost : 0)));
// allocate host memory
int c = 5; // value to which the array will be initialized
int *h_a = 0; // pointer to the array data in host memory
int *hAligned_a = 0; // pointer to the array data in host memory (aligned to MEMORY_ALIGNMENT)
// Allocate Host memory (could be using cudaMallocHost or VirtualAlloc/mmap if using the new CUDA 4.0 features
AllocateHostMemory(bPinGenericMemory, &h_a, &hAligned_a, nbytes);
// allocate device memory
int *d_a = 0, *d_c = 0; // pointers to data and init value in the device memory
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
checkCudaErrors(cudaMalloc((void **)&d_c, sizeof(int)));
checkCudaErrors(cudaMemcpy(d_c, &c, sizeof(int), cudaMemcpyHostToDevice));
printf("\nStarting Test\n");
// allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
for (int i = 0; i < nstreams; i++)
{
checkCudaErrors(cudaStreamCreate(&(streams[i])));
}
// create CUDA event handles
// use blocking sync
cudaEvent_t start_event, stop_event;
int eventflags = ((device_sync_method == cudaDeviceBlockingSync) ? cudaEventBlockingSync: cudaEventDefault);
checkCudaErrors(cudaEventCreateWithFlags(&start_event, eventflags));
checkCudaErrors(cudaEventCreateWithFlags(&stop_event, eventflags));
// time memcopy from device
checkCudaErrors(cudaEventRecord(start_event, 0)); // record in stream-0, to ensure that all previous CUDA calls have completed
checkCudaErrors(cudaMemcpyAsync(hAligned_a, d_a, nbytes, cudaMemcpyDeviceToHost, streams[0]));
checkCudaErrors(cudaEventRecord(stop_event, 0));
checkCudaErrors(cudaEventSynchronize(stop_event)); // block until the event is actually recorded
checkCudaErrors(cudaEventElapsedTime(&time_memcpy, start_event, stop_event));
printf("memcopy:\t%.2f\n", time_memcpy);
// time kernel
threads=dim3(512, 1);
blocks=dim3(n / threads.x, 1);
checkCudaErrors(cudaEventRecord(start_event, 0));
init_array<<<blocks, threads, 0, streams[0]>>>(d_a, d_c, niterations);
checkCudaErrors(cudaEventRecord(stop_event, 0));
checkCudaErrors(cudaEventSynchronize(stop_event));
checkCudaErrors(cudaEventElapsedTime(&time_kernel, start_event, stop_event));
printf("kernel:\t\t%.2f\n", time_kernel);
//////////////////////////////////////////////////////////////////////
// time non-streamed execution for reference
threads=dim3(512, 1);
blocks=dim3(n / threads.x, 1);
checkCudaErrors(cudaEventRecord(start_event, 0));
for (int k = 0; k < nreps; k++)
{
init_array<<<blocks, threads>>>(d_a, d_c, niterations);
checkCudaErrors(cudaMemcpy(hAligned_a, d_a, nbytes, cudaMemcpyDeviceToHost));
}
checkCudaErrors(cudaEventRecord(stop_event, 0));
checkCudaErrors(cudaEventSynchronize(stop_event));
checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("non-streamed:\t%.2f\n", elapsed_time / nreps);
//////////////////////////////////////////////////////////////////////
// time execution with nstreams streams
threads=dim3(512,1);
blocks=dim3(n/(nstreams*threads.x),1);
memset(hAligned_a, 255, nbytes); // set host memory bits to all 1s, for testing correctness
checkCudaErrors(cudaMemset(d_a, 0, nbytes)); // set device memory to all 0s, for testing correctness
checkCudaErrors(cudaEventRecord(start_event, 0));
for (int k = 0; k < nreps; k++)
{
// asynchronously launch nstreams kernels, each operating on its own portion of data
for (int i = 0; i < nstreams; i++)
{
init_array<<<blocks, threads, 0, streams[i]>>>(d_a + i *n / nstreams, d_c, niterations);
}
// asynchronously launch nstreams memcopies. Note that memcopy in stream x will only
// commence executing when all previous CUDA calls in stream x have completed
for (int i = 0; i < nstreams; i++)
{
checkCudaErrors(cudaMemcpyAsync(hAligned_a + i * n / nstreams, d_a + i * n / nstreams, nbytes / nstreams, cudaMemcpyDeviceToHost, streams[i]));
}
}
checkCudaErrors(cudaEventRecord(stop_event, 0));
checkCudaErrors(cudaEventSynchronize(stop_event));
checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("%d streams:\t%.2f\n", nstreams, elapsed_time / nreps);
// check whether the output is correct
printf("-------------------------------\n");
bool bResults = correct_data(hAligned_a, n, c*nreps*niterations);
// release resources
for (int i = 0; i < nstreams; i++)
{
checkCudaErrors(cudaStreamDestroy(streams[i]));
}
checkCudaErrors(cudaEventDestroy(start_event));
checkCudaErrors(cudaEventDestroy(stop_event));
// Free cudaMallocHost or Generic Host allocated memory (from CUDA 4.0)
FreeHostMemory(bPinGenericMemory, &h_a, &hAligned_a, nbytes);
checkCudaErrors(cudaFree(d_a));
checkCudaErrors(cudaFree(d_c));
return bResults ? EXIT_SUCCESS : EXIT_FAILURE;
}
|
05ec4dcc38bf33673f9030668476f91214660a77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test evaluation for caching allocator of device memory
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>]"
"[--bytes=<timing bytes>]"
"[--i=<timing iterations>]"
"\n", argv[0]);
exit(0);
}
#if (CUB_PTX_ARCH == 0)
// Initialize device
CubDebugExit(args.DeviceInit());
// Get number of GPUs and current GPU
int num_gpus;
int initial_gpu;
int timing_iterations = 10000;
int timing_bytes = 1024 * 1024;
if (CubDebug(hipGetDeviceCount(&num_gpus))) exit(1);
if (CubDebug(hipGetDevice(&initial_gpu))) exit(1);
args.GetCmdLineArgument("i", timing_iterations);
args.GetCmdLineArgument("bytes", timing_bytes);
// Create default allocator (caches up to 6MB in device allocations per GPU)
CachingDeviceAllocator allocator;
allocator.debug = true;
printf("Running single-gpu tests...\n"); fflush(stdout);
//
// Test0
//
// Create a new stream
hipStream_t other_stream;
CubDebugExit(hipStreamCreate(&other_stream));
// Allocate 999 bytes on the current gpu in stream0
char *d_999B_stream0_a;
char *d_999B_stream0_b;
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0));
// Run some big kernel in stream 0
hipLaunchKernelGGL(( EmptyKernel<void>), dim3(32000), dim3(512), 1024 * 8, 0, );
// Free d_999B_stream0_a
CubDebugExit(allocator.DeviceFree(d_999B_stream0_a));
// Allocate another 999 bytes in stream 0
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0));
// Check that that we have 1 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 1);
// Check that that we have no cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 0);
// Run some big kernel in stream 0
hipLaunchKernelGGL(( EmptyKernel<void>), dim3(32000), dim3(512), 1024 * 8, 0, );
// Free d_999B_stream0_b
CubDebugExit(allocator.DeviceFree(d_999B_stream0_b));
// Allocate 999 bytes on the current gpu in other_stream
char *d_999B_stream_other_a;
char *d_999B_stream_other_b;
allocator.DeviceAllocate((void **) &d_999B_stream_other_a, 999, other_stream);
// Check that that we have 1 live blocks on the initial GPU (that we allocated a new one because d_999B_stream0_b is only available for stream 0 until it becomes idle)
AssertEquals(allocator.live_blocks.size(), 1);
// Check that that we have one cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 1);
// Run some big kernel in other_stream
hipLaunchKernelGGL(( EmptyKernel<void>), dim3(32000), dim3(512), 1024 * 8, other_stream, );
// Free d_999B_stream_other
CubDebugExit(allocator.DeviceFree(d_999B_stream_other_a));
// Check that we can now use both allocations in stream 0 after synchronizing the device
CubDebugExit(hipDeviceSynchronize());
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0));
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0));
// Check that that we have 2 live blocks on the initial GPU
AssertEquals(allocator.live_blocks.size(), 2);
// Check that that we have no cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 0);
// Free d_999B_stream0_a and d_999B_stream0_b
CubDebugExit(allocator.DeviceFree(d_999B_stream0_a));
CubDebugExit(allocator.DeviceFree(d_999B_stream0_b));
// Check that we can now use both allocations in other_stream
CubDebugExit(hipDeviceSynchronize());
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream_other_a, 999, other_stream));
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream_other_b, 999, other_stream));
// Check that that we have 2 live blocks on the initial GPU
AssertEquals(allocator.live_blocks.size(), 2);
// Check that that we have no cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 0);
// Run some big kernel in other_stream
hipLaunchKernelGGL(( EmptyKernel<void>), dim3(32000), dim3(512), 1024 * 8, other_stream, );
// Free d_999B_stream_other_a and d_999B_stream_other_b
CubDebugExit(allocator.DeviceFree(d_999B_stream_other_a));
CubDebugExit(allocator.DeviceFree(d_999B_stream_other_b));
// Check that we can now use both allocations in stream 0 after synchronizing the device and destroying the other stream
CubDebugExit(hipDeviceSynchronize());
CubDebugExit(hipStreamDestroy(other_stream));
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0));
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0));
// Check that that we have 2 live blocks on the initial GPU
AssertEquals(allocator.live_blocks.size(), 2);
// Check that that we have no cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 0);
// Free d_999B_stream0_a and d_999B_stream0_b
CubDebugExit(allocator.DeviceFree(d_999B_stream0_a));
CubDebugExit(allocator.DeviceFree(d_999B_stream0_b));
// Free all cached
CubDebugExit(allocator.FreeAllCached());
//
// Test1
//
// Allocate 5 bytes on the current gpu
char *d_5B;
CubDebugExit(allocator.DeviceAllocate((void **) &d_5B, 5));
// Check that that we have zero free bytes cached on the initial GPU
AssertEquals(allocator.cached_bytes[initial_gpu].free, 0);
// Check that that we have 1 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 1);
//
// Test2
//
// Allocate 4096 bytes on the current gpu
char *d_4096B;
CubDebugExit(allocator.DeviceAllocate((void **) &d_4096B, 4096));
// Check that that we have 2 live blocks on the initial GPU
AssertEquals(allocator.live_blocks.size(), 2);
//
// Test3
//
// DeviceFree d_5B
CubDebugExit(allocator.DeviceFree(d_5B));
// Check that that we have min_bin_bytes free bytes cached on the initial gpu
AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes);
// Check that that we have 1 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 1);
// Check that that we have 1 cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 1);
//
// Test4
//
// DeviceFree d_4096B
CubDebugExit(allocator.DeviceFree(d_4096B));
// Check that that we have the 4096 + min_bin free bytes cached on the initial gpu
AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes + 4096);
// Check that that we have 0 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 0);
// Check that that we have 2 cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 2);
//
// Test5
//
// Allocate 768 bytes on the current gpu
char *d_768B;
CubDebugExit(allocator.DeviceAllocate((void **) &d_768B, 768));
// Check that that we have the min_bin free bytes cached on the initial gpu (4096 was reused)
AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes);
// Check that that we have 1 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 1);
// Check that that we have 1 cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 1);
//
// Test6
//
// Allocate max_cached_bytes on the current gpu
char *d_max_cached;
CubDebugExit(allocator.DeviceAllocate((void **) &d_max_cached, allocator.max_cached_bytes));
// DeviceFree d_max_cached
CubDebugExit(allocator.DeviceFree(d_max_cached));
// Check that that we have the min_bin free bytes cached on the initial gpu (max cached was not returned because we went over)
AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes);
// Check that that we have 1 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 1);
// Check that that we still have 1 cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 1);
//
// Test7
//
// Free all cached blocks on all GPUs
CubDebugExit(allocator.FreeAllCached());
// Check that that we have 0 bytes cached on the initial GPU
AssertEquals(allocator.cached_bytes[initial_gpu].free, 0);
// Check that that we have 0 cached blocks across all GPUs
AssertEquals(allocator.cached_blocks.size(), 0);
// Check that that still we have 1 live block across all GPUs
AssertEquals(allocator.live_blocks.size(), 1);
//
// Test8
//
// Allocate max cached bytes + 1 on the current gpu
char *d_max_cached_plus;
CubDebugExit(allocator.DeviceAllocate((void **) &d_max_cached_plus, allocator.max_cached_bytes + 1));
// DeviceFree max cached bytes
CubDebugExit(allocator.DeviceFree(d_max_cached_plus));
// DeviceFree d_768B
CubDebugExit(allocator.DeviceFree(d_768B));
unsigned int power;
size_t rounded_bytes;
allocator.NearestPowerOf(power, rounded_bytes, allocator.bin_growth, 768);
// Check that that we have 4096 free bytes cached on the initial gpu
AssertEquals(allocator.cached_bytes[initial_gpu].free, rounded_bytes);
// Check that that we have 1 cached blocks across all GPUs
AssertEquals(allocator.cached_blocks.size(), 1);
// Check that that still we have 0 live block across all GPUs
AssertEquals(allocator.live_blocks.size(), 0);
#ifndef CUB_CDP
// BUG: find out why these tests fail when one GPU is CDP compliant and the other is not
if (num_gpus > 1)
{
printf("\nRunning multi-gpu tests...\n"); fflush(stdout);
//
// Test9
//
// Allocate 768 bytes on the next gpu
int next_gpu = (initial_gpu + 1) % num_gpus;
char *d_768B_2;
CubDebugExit(allocator.DeviceAllocate(next_gpu, (void **) &d_768B_2, 768));
// DeviceFree d_768B on the next gpu
CubDebugExit(allocator.DeviceFree(next_gpu, d_768B_2));
// Re-allocate 768 bytes on the next gpu
CubDebugExit(allocator.DeviceAllocate(next_gpu, (void **) &d_768B_2, 768));
// Re-free d_768B on the next gpu
CubDebugExit(allocator.DeviceFree(next_gpu, d_768B_2));
// Check that that we have 4096 free bytes cached on the initial gpu
AssertEquals(allocator.cached_bytes[initial_gpu].free, rounded_bytes);
// Check that that we have 4096 free bytes cached on the second gpu
AssertEquals(allocator.cached_bytes[next_gpu].free, rounded_bytes);
// Check that that we have 2 cached blocks across all GPUs
AssertEquals(allocator.cached_blocks.size(), 2);
// Check that that still we have 0 live block across all GPUs
AssertEquals(allocator.live_blocks.size(), 0);
}
#endif // CUB_CDP
//
// Performance
//
printf("\nCPU Performance (%d timing iterations, %d bytes):\n", timing_iterations, timing_bytes);
fflush(stdout); fflush(stderr);
// CPU performance comparisons vs cached. Allocate and free a 1MB block 2000 times
CpuTimer cpu_timer;
char *d_1024MB = NULL;
allocator.debug = false;
// Prime the caching allocator and the kernel
CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes));
CubDebugExit(allocator.DeviceFree(d_1024MB));
hipLaunchKernelGGL(( cub::EmptyKernel<void>), dim3(1), dim3(32), 0, 0, );
// CUDA
cpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
CubDebugExit(hipMalloc((void **) &d_1024MB, timing_bytes));
CubDebugExit(hipFree(d_1024MB));
}
cpu_timer.Stop();
float cuda_malloc_elapsed_millis = cpu_timer.ElapsedMillis();
// CUB
cpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes));
CubDebugExit(allocator.DeviceFree(d_1024MB));
}
cpu_timer.Stop();
float cub_calloc_elapsed_millis = cpu_timer.ElapsedMillis();
printf("\t CUB CachingDeviceAllocator allocation CPU speedup: %.2f (avg hipMalloc %.4f ms vs. avg DeviceAllocate %.4f ms)\n",
cuda_malloc_elapsed_millis / cub_calloc_elapsed_millis,
cuda_malloc_elapsed_millis / timing_iterations,
cub_calloc_elapsed_millis / timing_iterations);
// GPU performance comparisons. Allocate and free a 1MB block 2000 times
GpuTimer gpu_timer;
printf("\nGPU Performance (%d timing iterations, %d bytes):\n", timing_iterations, timing_bytes);
fflush(stdout); fflush(stderr);
// Kernel-only
gpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
hipLaunchKernelGGL(( cub::EmptyKernel<void>), dim3(1), dim3(32), 0, 0, );
}
gpu_timer.Stop();
float cuda_empty_elapsed_millis = gpu_timer.ElapsedMillis();
// CUDA
gpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
CubDebugExit(hipMalloc((void **) &d_1024MB, timing_bytes));
hipLaunchKernelGGL(( cub::EmptyKernel<void>), dim3(1), dim3(32), 0, 0, );
CubDebugExit(hipFree(d_1024MB));
}
gpu_timer.Stop();
cuda_malloc_elapsed_millis = gpu_timer.ElapsedMillis() - cuda_empty_elapsed_millis;
// CUB
gpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes));
hipLaunchKernelGGL(( cub::EmptyKernel<void>), dim3(1), dim3(32), 0, 0, );
CubDebugExit(allocator.DeviceFree(d_1024MB));
}
gpu_timer.Stop();
cub_calloc_elapsed_millis = gpu_timer.ElapsedMillis() - cuda_empty_elapsed_millis;
printf("\t CUB CachingDeviceAllocator allocation GPU speedup: %.2f (avg hipMalloc %.4f ms vs. avg DeviceAllocate %.4f ms)\n",
cuda_malloc_elapsed_millis / cub_calloc_elapsed_millis,
cuda_malloc_elapsed_millis / timing_iterations,
cub_calloc_elapsed_millis / timing_iterations);
#endif
printf("Success\n");
return 0;
}
| 05ec4dcc38bf33673f9030668476f91214660a77.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test evaluation for caching allocator of device memory
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <cub/util_allocator.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>]"
"[--bytes=<timing bytes>]"
"[--i=<timing iterations>]"
"\n", argv[0]);
exit(0);
}
#if (CUB_PTX_ARCH == 0)
// Initialize device
CubDebugExit(args.DeviceInit());
// Get number of GPUs and current GPU
int num_gpus;
int initial_gpu;
int timing_iterations = 10000;
int timing_bytes = 1024 * 1024;
if (CubDebug(cudaGetDeviceCount(&num_gpus))) exit(1);
if (CubDebug(cudaGetDevice(&initial_gpu))) exit(1);
args.GetCmdLineArgument("i", timing_iterations);
args.GetCmdLineArgument("bytes", timing_bytes);
// Create default allocator (caches up to 6MB in device allocations per GPU)
CachingDeviceAllocator allocator;
allocator.debug = true;
printf("Running single-gpu tests...\n"); fflush(stdout);
//
// Test0
//
// Create a new stream
cudaStream_t other_stream;
CubDebugExit(cudaStreamCreate(&other_stream));
// Allocate 999 bytes on the current gpu in stream0
char *d_999B_stream0_a;
char *d_999B_stream0_b;
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0));
// Run some big kernel in stream 0
EmptyKernel<void><<<32000, 512, 1024 * 8, 0>>>();
// Free d_999B_stream0_a
CubDebugExit(allocator.DeviceFree(d_999B_stream0_a));
// Allocate another 999 bytes in stream 0
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0));
// Check that that we have 1 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 1);
// Check that that we have no cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 0);
// Run some big kernel in stream 0
EmptyKernel<void><<<32000, 512, 1024 * 8, 0>>>();
// Free d_999B_stream0_b
CubDebugExit(allocator.DeviceFree(d_999B_stream0_b));
// Allocate 999 bytes on the current gpu in other_stream
char *d_999B_stream_other_a;
char *d_999B_stream_other_b;
allocator.DeviceAllocate((void **) &d_999B_stream_other_a, 999, other_stream);
// Check that that we have 1 live blocks on the initial GPU (that we allocated a new one because d_999B_stream0_b is only available for stream 0 until it becomes idle)
AssertEquals(allocator.live_blocks.size(), 1);
// Check that that we have one cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 1);
// Run some big kernel in other_stream
EmptyKernel<void><<<32000, 512, 1024 * 8, other_stream>>>();
// Free d_999B_stream_other
CubDebugExit(allocator.DeviceFree(d_999B_stream_other_a));
// Check that we can now use both allocations in stream 0 after synchronizing the device
CubDebugExit(cudaDeviceSynchronize());
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0));
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0));
// Check that that we have 2 live blocks on the initial GPU
AssertEquals(allocator.live_blocks.size(), 2);
// Check that that we have no cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 0);
// Free d_999B_stream0_a and d_999B_stream0_b
CubDebugExit(allocator.DeviceFree(d_999B_stream0_a));
CubDebugExit(allocator.DeviceFree(d_999B_stream0_b));
// Check that we can now use both allocations in other_stream
CubDebugExit(cudaDeviceSynchronize());
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream_other_a, 999, other_stream));
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream_other_b, 999, other_stream));
// Check that that we have 2 live blocks on the initial GPU
AssertEquals(allocator.live_blocks.size(), 2);
// Check that that we have no cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 0);
// Run some big kernel in other_stream
EmptyKernel<void><<<32000, 512, 1024 * 8, other_stream>>>();
// Free d_999B_stream_other_a and d_999B_stream_other_b
CubDebugExit(allocator.DeviceFree(d_999B_stream_other_a));
CubDebugExit(allocator.DeviceFree(d_999B_stream_other_b));
// Check that we can now use both allocations in stream 0 after synchronizing the device and destroying the other stream
CubDebugExit(cudaDeviceSynchronize());
CubDebugExit(cudaStreamDestroy(other_stream));
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0));
CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0));
// Check that that we have 2 live blocks on the initial GPU
AssertEquals(allocator.live_blocks.size(), 2);
// Check that that we have no cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 0);
// Free d_999B_stream0_a and d_999B_stream0_b
CubDebugExit(allocator.DeviceFree(d_999B_stream0_a));
CubDebugExit(allocator.DeviceFree(d_999B_stream0_b));
// Free all cached
CubDebugExit(allocator.FreeAllCached());
//
// Test1
//
// Allocate 5 bytes on the current gpu
char *d_5B;
CubDebugExit(allocator.DeviceAllocate((void **) &d_5B, 5));
// Check that that we have zero free bytes cached on the initial GPU
AssertEquals(allocator.cached_bytes[initial_gpu].free, 0);
// Check that that we have 1 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 1);
//
// Test2
//
// Allocate 4096 bytes on the current gpu
char *d_4096B;
CubDebugExit(allocator.DeviceAllocate((void **) &d_4096B, 4096));
// Check that that we have 2 live blocks on the initial GPU
AssertEquals(allocator.live_blocks.size(), 2);
//
// Test3
//
// DeviceFree d_5B
CubDebugExit(allocator.DeviceFree(d_5B));
// Check that that we have min_bin_bytes free bytes cached on the initial gpu
AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes);
// Check that that we have 1 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 1);
// Check that that we have 1 cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 1);
//
// Test4
//
// DeviceFree d_4096B
CubDebugExit(allocator.DeviceFree(d_4096B));
// Check that that we have the 4096 + min_bin free bytes cached on the initial gpu
AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes + 4096);
// Check that that we have 0 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 0);
// Check that that we have 2 cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 2);
//
// Test5
//
// Allocate 768 bytes on the current gpu
char *d_768B;
CubDebugExit(allocator.DeviceAllocate((void **) &d_768B, 768));
// Check that that we have the min_bin free bytes cached on the initial gpu (4096 was reused)
AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes);
// Check that that we have 1 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 1);
// Check that that we have 1 cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 1);
//
// Test6
//
// Allocate max_cached_bytes on the current gpu
char *d_max_cached;
CubDebugExit(allocator.DeviceAllocate((void **) &d_max_cached, allocator.max_cached_bytes));
// DeviceFree d_max_cached
CubDebugExit(allocator.DeviceFree(d_max_cached));
// Check that that we have the min_bin free bytes cached on the initial gpu (max cached was not returned because we went over)
AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes);
// Check that that we have 1 live block on the initial GPU
AssertEquals(allocator.live_blocks.size(), 1);
// Check that that we still have 1 cached block on the initial GPU
AssertEquals(allocator.cached_blocks.size(), 1);
//
// Test7
//
// Free all cached blocks on all GPUs
CubDebugExit(allocator.FreeAllCached());
// Check that that we have 0 bytes cached on the initial GPU
AssertEquals(allocator.cached_bytes[initial_gpu].free, 0);
// Check that that we have 0 cached blocks across all GPUs
AssertEquals(allocator.cached_blocks.size(), 0);
// Check that that still we have 1 live block across all GPUs
AssertEquals(allocator.live_blocks.size(), 1);
//
// Test8
//
// Allocate max cached bytes + 1 on the current gpu
char *d_max_cached_plus;
CubDebugExit(allocator.DeviceAllocate((void **) &d_max_cached_plus, allocator.max_cached_bytes + 1));
// DeviceFree max cached bytes
CubDebugExit(allocator.DeviceFree(d_max_cached_plus));
// DeviceFree d_768B
CubDebugExit(allocator.DeviceFree(d_768B));
unsigned int power;
size_t rounded_bytes;
allocator.NearestPowerOf(power, rounded_bytes, allocator.bin_growth, 768);
// Check that that we have 4096 free bytes cached on the initial gpu
AssertEquals(allocator.cached_bytes[initial_gpu].free, rounded_bytes);
// Check that that we have 1 cached blocks across all GPUs
AssertEquals(allocator.cached_blocks.size(), 1);
// Check that that still we have 0 live block across all GPUs
AssertEquals(allocator.live_blocks.size(), 0);
#ifndef CUB_CDP
// BUG: find out why these tests fail when one GPU is CDP compliant and the other is not
if (num_gpus > 1)
{
printf("\nRunning multi-gpu tests...\n"); fflush(stdout);
//
// Test9
//
// Allocate 768 bytes on the next gpu
int next_gpu = (initial_gpu + 1) % num_gpus;
char *d_768B_2;
CubDebugExit(allocator.DeviceAllocate(next_gpu, (void **) &d_768B_2, 768));
// DeviceFree d_768B on the next gpu
CubDebugExit(allocator.DeviceFree(next_gpu, d_768B_2));
// Re-allocate 768 bytes on the next gpu
CubDebugExit(allocator.DeviceAllocate(next_gpu, (void **) &d_768B_2, 768));
// Re-free d_768B on the next gpu
CubDebugExit(allocator.DeviceFree(next_gpu, d_768B_2));
// Check that that we have 4096 free bytes cached on the initial gpu
AssertEquals(allocator.cached_bytes[initial_gpu].free, rounded_bytes);
// Check that that we have 4096 free bytes cached on the second gpu
AssertEquals(allocator.cached_bytes[next_gpu].free, rounded_bytes);
// Check that that we have 2 cached blocks across all GPUs
AssertEquals(allocator.cached_blocks.size(), 2);
// Check that that still we have 0 live block across all GPUs
AssertEquals(allocator.live_blocks.size(), 0);
}
#endif // CUB_CDP
//
// Performance
//
printf("\nCPU Performance (%d timing iterations, %d bytes):\n", timing_iterations, timing_bytes);
fflush(stdout); fflush(stderr);
// CPU performance comparisons vs cached. Allocate and free a 1MB block 2000 times
CpuTimer cpu_timer;
char *d_1024MB = NULL;
allocator.debug = false;
// Prime the caching allocator and the kernel
CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes));
CubDebugExit(allocator.DeviceFree(d_1024MB));
cub::EmptyKernel<void><<<1, 32>>>();
// CUDA
cpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
CubDebugExit(cudaMalloc((void **) &d_1024MB, timing_bytes));
CubDebugExit(cudaFree(d_1024MB));
}
cpu_timer.Stop();
float cuda_malloc_elapsed_millis = cpu_timer.ElapsedMillis();
// CUB
cpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes));
CubDebugExit(allocator.DeviceFree(d_1024MB));
}
cpu_timer.Stop();
float cub_calloc_elapsed_millis = cpu_timer.ElapsedMillis();
printf("\t CUB CachingDeviceAllocator allocation CPU speedup: %.2f (avg cudaMalloc %.4f ms vs. avg DeviceAllocate %.4f ms)\n",
cuda_malloc_elapsed_millis / cub_calloc_elapsed_millis,
cuda_malloc_elapsed_millis / timing_iterations,
cub_calloc_elapsed_millis / timing_iterations);
// GPU performance comparisons. Allocate and free a 1MB block 2000 times
GpuTimer gpu_timer;
printf("\nGPU Performance (%d timing iterations, %d bytes):\n", timing_iterations, timing_bytes);
fflush(stdout); fflush(stderr);
// Kernel-only
gpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
cub::EmptyKernel<void><<<1, 32>>>();
}
gpu_timer.Stop();
float cuda_empty_elapsed_millis = gpu_timer.ElapsedMillis();
// CUDA
gpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
CubDebugExit(cudaMalloc((void **) &d_1024MB, timing_bytes));
cub::EmptyKernel<void><<<1, 32>>>();
CubDebugExit(cudaFree(d_1024MB));
}
gpu_timer.Stop();
cuda_malloc_elapsed_millis = gpu_timer.ElapsedMillis() - cuda_empty_elapsed_millis;
// CUB
gpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes));
cub::EmptyKernel<void><<<1, 32>>>();
CubDebugExit(allocator.DeviceFree(d_1024MB));
}
gpu_timer.Stop();
cub_calloc_elapsed_millis = gpu_timer.ElapsedMillis() - cuda_empty_elapsed_millis;
printf("\t CUB CachingDeviceAllocator allocation GPU speedup: %.2f (avg cudaMalloc %.4f ms vs. avg DeviceAllocate %.4f ms)\n",
cuda_malloc_elapsed_millis / cub_calloc_elapsed_millis,
cuda_malloc_elapsed_millis / timing_iterations,
cub_calloc_elapsed_millis / timing_iterations);
#endif
printf("Success\n");
return 0;
}
|
5c34ab4eda47e68a16cc6178c4baa2272be43850.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include "THHUNN.h"
#include "THHTensor.hpp"
#include "common.h"
#include "linear_upsampling.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
template<typename Dtype, typename Acctype>
__global__ void caffe_gpu_interp2_kernel(const int n,
const Acctype rwidth, const bool align_corners,
const THCDeviceTensor<Dtype, 3> data1, THCDeviceTensor<Dtype, 3> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int width1 = data1.getSize(2);
const int width2 = data2.getSize(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][w1];
data2[n][c][w2] = val;
}
}
return;
}
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Acctype val = w0lambda * data1[n][c][w1]
+ w1lambda * data1[n][c][w1+w1p];
data2[n][c][w2] = ScalarConvert<Acctype, Dtype>::to(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, typename Acctype>
__global__ void caffe_gpu_interp2_kernel_backward(const int n,
const Acctype rwidth, const bool align_corners,
THCDeviceTensor<Dtype, 3> data1, const THCDeviceTensor<Dtype, 3> data2){
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int width1 = data1.getSize(2);
const int width2 = data2.getSize(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][w1];
data1[n][c][w2] += val;
}
}
return;
}
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype d2val = data2[n][c][w2];
atomicAdd(data1[n][c][w1].data(),
ScalarConvert<Acctype, Dtype>::to(w0lambda * d2val));
atomicAdd(data1[n][c][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(w1lambda * d2val));
}
}
}
}
#include "generic/TemporalUpSamplingLinear.cu"
#include "THHGenerateFloatTypes.h"
| 5c34ab4eda47e68a16cc6178c4baa2272be43850.cu | // Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include "THCUNN.h"
#include "THCTensor.hpp"
#include "common.h"
#include "linear_upsampling.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
template<typename Dtype, typename Acctype>
__global__ void caffe_gpu_interp2_kernel(const int n,
const Acctype rwidth, const bool align_corners,
const THCDeviceTensor<Dtype, 3> data1, THCDeviceTensor<Dtype, 3> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int width1 = data1.getSize(2);
const int width2 = data2.getSize(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][w1];
data2[n][c][w2] = val;
}
}
return;
}
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Acctype val = w0lambda * data1[n][c][w1]
+ w1lambda * data1[n][c][w1+w1p];
data2[n][c][w2] = ScalarConvert<Acctype, Dtype>::to(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, typename Acctype>
__global__ void caffe_gpu_interp2_kernel_backward(const int n,
const Acctype rwidth, const bool align_corners,
THCDeviceTensor<Dtype, 3> data1, const THCDeviceTensor<Dtype, 3> data2){
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int width1 = data1.getSize(2);
const int width2 = data2.getSize(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][w1];
data1[n][c][w2] += val;
}
}
return;
}
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype d2val = data2[n][c][w2];
atomicAdd(data1[n][c][w1].data(),
ScalarConvert<Acctype, Dtype>::to(w0lambda * d2val));
atomicAdd(data1[n][c][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(w1lambda * d2val));
}
}
}
}
#include "generic/TemporalUpSamplingLinear.cu"
#include "THCGenerateFloatTypes.h"
|
be39119b7da9eea3ef4947680f5e5a472874cde8.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <hipcub/hipcub.hpp>
#include <hip/hip_fp16.h>
#include <stdio.h>
__device__ void print_val(int blockid, int threadid, float value){
if (blockid == 0 && threadid == 0) printf("tid: %d, value is: %.8f\n", threadid, float(value));
}
template <int Len>
struct Array {
__device__ __forceinline__ Array(){}
__device__ __forceinline__ Array(float* inputs){
#pragma unroll
for (int i = 0; i < Len; i++){
data[i] = inputs[i];
}
}
float data[Len];
};
template <int Len>
struct ArrayMaxFunc{
__device__ __forceinline__ Array<Len> operator()(
const Array<Len>& p1, const Array<Len>& p2)
{
Array<Len> result;
#pragma unroll
for (int i = 0; i < Len; i ++){
result.data[i] = p1.data[i] > p2.data[i] ? p1.data[i] : p2.data[i];
}
return result;
}
};
template <int Len>
struct ArraySumFunc{
__device__ __forceinline__ Array<Len> operator()(
const Array<Len>& p1, const Array<Len>& p2)
{
Array<Len> result;
#pragma unroll
for (int i = 0; i < Len; i ++){
result.data[i] = p1.data[i] + p2.data[i];
}
return result;
}
};
template <int VecLength, typename LoadType>
__device__ __forceinline__ void vmax(float* a, LoadType b, const float scaler)
{
half* b_h = reinterpret_cast<half*>(&b);
#pragma unroll
for (int i = 0; i < VecLength; i++){
a[i] = a[i] > __half2float(b_h[i]) * scaler ? a[i] : __half2float(b_h[i]) * scaler;
}
}
template <int VecLength, typename LoadType>
__device__ __forceinline__ void vexpsum(float* a, LoadType b, Array<VecLength> max, const float scaler)
{
half* b_h = reinterpret_cast<half*>(&b);
#pragma unroll
for (int i = 0; i < VecLength; i++){
a[i] += __expf(__half2float(b_h[i]) * scaler - max.data[i]);
}
}
template <int VecLength, typename LoadType>
__device__ __forceinline__ void vsoftmax(LoadType a, Array<VecLength> max, Array<VecLength> sum, LoadType* out, const float scaler)
{
half* a_h = reinterpret_cast<half*>(&a);
half out_reg[VecLength];
#pragma unroll
for (int i = 0; i < VecLength; i++){
out_reg[i] = __float2half(__expf(__half2float(a_h[i]) * scaler - max.data[i]) / sum.data[i]);
}
LoadType* out_reg_v = reinterpret_cast<LoadType*>(out_reg);
*(out) = *(out_reg_v);
}
template <int VecLength, typename LoadType, int BlockDim>
__device__ void csrSoftmaxKernel_(
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const half* __restrict__ values,
half* __restrict__ attn,
const float scaler)
{
int m_index = blockIdx.x;
m_index = __ldg(row_indices + m_index);
int row_offset = __ldg(row_offsets + m_index);
int nonzeros = __ldg(row_offsets + m_index + 1) - row_offset;
// Specialized BlockReduce for a 1D block on type half
typedef hipcub::BlockReduce<Array<VecLength>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ Array<VecLength> row_max;
__shared__ Array<VecLength> row_sum;
// Private register file that holds the loaded data
float in_attn[VecLength] = {-1e+10};
// Pointer to the input attention weight
const LoadType* values_v = reinterpret_cast<const LoadType *>(values) + row_offset;
LoadType* attn_v = reinterpret_cast<LoadType *>(attn) + row_offset;
// First Run: get the maximum number of the current row
for (int i = threadIdx.x; i < nonzeros; i += BlockDim){
// Load data to register file
vmax<VecLength, LoadType>(in_attn, __ldg(values_v + i), scaler);
}
Array<VecLength> local_max(in_attn);
Array<VecLength> max_val = BlockReduce(temp_storage).Reduce(local_max, ArrayMaxFunc<VecLength>());
if (threadIdx.x == 0) row_max = max_val;
__syncthreads();
// print_val(blockIdx.x, threadIdx.x, row_max.data[0]);
#pragma unroll
for (int i = 0; i < VecLength; i ++){
in_attn[i] = 0.0f;
}
max_val = row_max;
// print_val(blockIdx.x, threadIdx.x, max_val.data[0]);
// Second Run: Compute the sum
for (int i = threadIdx.x; i < nonzeros; i += BlockDim){
vexpsum<VecLength, LoadType>(in_attn, __ldg(values_v + i), max_val, scaler);
}
Array<VecLength> local_sum(in_attn);
Array<VecLength> sum_val = BlockReduce(temp_storage).Reduce(local_sum, ArraySumFunc<VecLength>());
if (threadIdx.x == 0) row_sum = sum_val;
__syncthreads();
sum_val = row_sum;
// print_val(blockIdx.x, threadIdx.x, sum_val.data[0]);
// Last Run: Do softmax
for (int i = threadIdx.x; i < nonzeros; i += BlockDim){
vsoftmax<VecLength, LoadType>(__ldg(values_v + i), max_val, sum_val, attn_v + i, scaler);
}
}
template <int VecLength, typename LoadType, int BlockDim>
__global__ void csrSoftmaxKernel(
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const half* __restrict__ values,
half* __restrict__ attn,
const float scaler)
{
csrSoftmaxKernel_<VecLength, LoadType, BlockDim>(
row_indices, row_offsets, values, attn, scaler
);
}
template <int VecLength, typename LoadType, int BlockDim>
__global__ void batchedCsrSoftmaxKernel(
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const half* __restrict__ values_b,
int values_stride,
half* __restrict__ attn_b,
int attn_stride,
const float scaler)
{
int entry_idx = blockIdx.y;
const half* values = values_b + values_stride * entry_idx;
half* attn = attn_b + attn_stride * entry_idx;
csrSoftmaxKernel_<VecLength, LoadType, BlockDim>(
row_indices, row_offsets, values, attn, scaler
);
}
torch::Tensor csr_softmax_cuda(
torch::Tensor row_indices,
torch::Tensor row_offsets,
torch::Tensor values,
float scaler,
int vec_length)
{
int m = row_indices.size(0);
dim3 block, grid;
block.x = 128;
grid.x = m;
auto attn = torch::empty_like(values);
switch(vec_length){
case 8:
hipLaunchKernelGGL(( csrSoftmaxKernel<8, float4, 128>), dim3(grid), dim3(block), 0, 0,
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
reinterpret_cast<half *>(attn.data<torch::Half>()),
scaler
);
break;
case 4:
hipLaunchKernelGGL(( csrSoftmaxKernel<4, float2, 128>), dim3(grid), dim3(block), 0, 0,
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
reinterpret_cast<half *>(attn.data<torch::Half>()),
scaler
);
break;
case 2:
hipLaunchKernelGGL(( csrSoftmaxKernel<2, float, 128>), dim3(grid), dim3(block), 0, 0,
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
reinterpret_cast<half *>(attn.data<torch::Half>()),
scaler
);
break;
}
return attn;
}
torch::Tensor batched_csr_softmax_cuda(
torch::Tensor row_indices,
torch::Tensor row_offsets,
torch::Tensor values,
float scaler,
int vec_length,
int batch_size)
{
int m = row_indices.size(0);
dim3 block, grid;
block.x = 128;
grid.x = m;
grid.y = batch_size;
auto attn = torch::empty_like(values);
int stride = values.numel()/batch_size;
switch(vec_length){
case 8:
hipLaunchKernelGGL(( batchedCsrSoftmaxKernel<8, float4, 128>), dim3(grid), dim3(block), 0, 0,
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
stride,
reinterpret_cast<half *>(attn.data<torch::Half>()),
stride, scaler
);
break;
case 4:
hipLaunchKernelGGL(( batchedCsrSoftmaxKernel<4, float2, 128>), dim3(grid), dim3(block), 0, 0,
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
stride,
reinterpret_cast<half *>(attn.data<torch::Half>()),
stride,
scaler
);
break;
case 2:
hipLaunchKernelGGL(( batchedCsrSoftmaxKernel<2, float, 128>), dim3(grid), dim3(block), 0, 0,
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
stride,
reinterpret_cast<half *>(attn.data<torch::Half>()),
stride,
scaler
);
break;
}
return attn;
} | be39119b7da9eea3ef4947680f5e5a472874cde8.cu | #include <torch/extension.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <vector>
#include <cub/cub.cuh>
#include <cuda_fp16.h>
#include <stdio.h>
__device__ void print_val(int blockid, int threadid, float value){
if (blockid == 0 && threadid == 0) printf("tid: %d, value is: %.8f\n", threadid, float(value));
}
template <int Len>
struct Array {
__device__ __forceinline__ Array(){}
__device__ __forceinline__ Array(float* inputs){
#pragma unroll
for (int i = 0; i < Len; i++){
data[i] = inputs[i];
}
}
float data[Len];
};
template <int Len>
struct ArrayMaxFunc{
__device__ __forceinline__ Array<Len> operator()(
const Array<Len>& p1, const Array<Len>& p2)
{
Array<Len> result;
#pragma unroll
for (int i = 0; i < Len; i ++){
result.data[i] = p1.data[i] > p2.data[i] ? p1.data[i] : p2.data[i];
}
return result;
}
};
template <int Len>
struct ArraySumFunc{
__device__ __forceinline__ Array<Len> operator()(
const Array<Len>& p1, const Array<Len>& p2)
{
Array<Len> result;
#pragma unroll
for (int i = 0; i < Len; i ++){
result.data[i] = p1.data[i] + p2.data[i];
}
return result;
}
};
template <int VecLength, typename LoadType>
__device__ __forceinline__ void vmax(float* a, LoadType b, const float scaler)
{
half* b_h = reinterpret_cast<half*>(&b);
#pragma unroll
for (int i = 0; i < VecLength; i++){
a[i] = a[i] > __half2float(b_h[i]) * scaler ? a[i] : __half2float(b_h[i]) * scaler;
}
}
template <int VecLength, typename LoadType>
__device__ __forceinline__ void vexpsum(float* a, LoadType b, Array<VecLength> max, const float scaler)
{
half* b_h = reinterpret_cast<half*>(&b);
#pragma unroll
for (int i = 0; i < VecLength; i++){
a[i] += __expf(__half2float(b_h[i]) * scaler - max.data[i]);
}
}
template <int VecLength, typename LoadType>
__device__ __forceinline__ void vsoftmax(LoadType a, Array<VecLength> max, Array<VecLength> sum, LoadType* out, const float scaler)
{
half* a_h = reinterpret_cast<half*>(&a);
half out_reg[VecLength];
#pragma unroll
for (int i = 0; i < VecLength; i++){
out_reg[i] = __float2half(__expf(__half2float(a_h[i]) * scaler - max.data[i]) / sum.data[i]);
}
LoadType* out_reg_v = reinterpret_cast<LoadType*>(out_reg);
*(out) = *(out_reg_v);
}
template <int VecLength, typename LoadType, int BlockDim>
__device__ void csrSoftmaxKernel_(
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const half* __restrict__ values,
half* __restrict__ attn,
const float scaler)
{
int m_index = blockIdx.x;
m_index = __ldg(row_indices + m_index);
int row_offset = __ldg(row_offsets + m_index);
int nonzeros = __ldg(row_offsets + m_index + 1) - row_offset;
// Specialized BlockReduce for a 1D block on type half
typedef cub::BlockReduce<Array<VecLength>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ Array<VecLength> row_max;
__shared__ Array<VecLength> row_sum;
// Private register file that holds the loaded data
float in_attn[VecLength] = {-1e+10};
// Pointer to the input attention weight
const LoadType* values_v = reinterpret_cast<const LoadType *>(values) + row_offset;
LoadType* attn_v = reinterpret_cast<LoadType *>(attn) + row_offset;
// First Run: get the maximum number of the current row
for (int i = threadIdx.x; i < nonzeros; i += BlockDim){
// Load data to register file
vmax<VecLength, LoadType>(in_attn, __ldg(values_v + i), scaler);
}
Array<VecLength> local_max(in_attn);
Array<VecLength> max_val = BlockReduce(temp_storage).Reduce(local_max, ArrayMaxFunc<VecLength>());
if (threadIdx.x == 0) row_max = max_val;
__syncthreads();
// print_val(blockIdx.x, threadIdx.x, row_max.data[0]);
#pragma unroll
for (int i = 0; i < VecLength; i ++){
in_attn[i] = 0.0f;
}
max_val = row_max;
// print_val(blockIdx.x, threadIdx.x, max_val.data[0]);
// Second Run: Compute the sum
for (int i = threadIdx.x; i < nonzeros; i += BlockDim){
vexpsum<VecLength, LoadType>(in_attn, __ldg(values_v + i), max_val, scaler);
}
Array<VecLength> local_sum(in_attn);
Array<VecLength> sum_val = BlockReduce(temp_storage).Reduce(local_sum, ArraySumFunc<VecLength>());
if (threadIdx.x == 0) row_sum = sum_val;
__syncthreads();
sum_val = row_sum;
// print_val(blockIdx.x, threadIdx.x, sum_val.data[0]);
// Last Run: Do softmax
for (int i = threadIdx.x; i < nonzeros; i += BlockDim){
vsoftmax<VecLength, LoadType>(__ldg(values_v + i), max_val, sum_val, attn_v + i, scaler);
}
}
template <int VecLength, typename LoadType, int BlockDim>
__global__ void csrSoftmaxKernel(
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const half* __restrict__ values,
half* __restrict__ attn,
const float scaler)
{
csrSoftmaxKernel_<VecLength, LoadType, BlockDim>(
row_indices, row_offsets, values, attn, scaler
);
}
template <int VecLength, typename LoadType, int BlockDim>
__global__ void batchedCsrSoftmaxKernel(
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const half* __restrict__ values_b,
int values_stride,
half* __restrict__ attn_b,
int attn_stride,
const float scaler)
{
int entry_idx = blockIdx.y;
const half* values = values_b + values_stride * entry_idx;
half* attn = attn_b + attn_stride * entry_idx;
csrSoftmaxKernel_<VecLength, LoadType, BlockDim>(
row_indices, row_offsets, values, attn, scaler
);
}
torch::Tensor csr_softmax_cuda(
torch::Tensor row_indices,
torch::Tensor row_offsets,
torch::Tensor values,
float scaler,
int vec_length)
{
int m = row_indices.size(0);
dim3 block, grid;
block.x = 128;
grid.x = m;
auto attn = torch::empty_like(values);
switch(vec_length){
case 8:
csrSoftmaxKernel<8, float4, 128><<<grid, block>>>(
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
reinterpret_cast<half *>(attn.data<torch::Half>()),
scaler
);
break;
case 4:
csrSoftmaxKernel<4, float2, 128><<<grid, block>>>(
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
reinterpret_cast<half *>(attn.data<torch::Half>()),
scaler
);
break;
case 2:
csrSoftmaxKernel<2, float, 128><<<grid, block>>>(
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
reinterpret_cast<half *>(attn.data<torch::Half>()),
scaler
);
break;
}
return attn;
}
torch::Tensor batched_csr_softmax_cuda(
torch::Tensor row_indices,
torch::Tensor row_offsets,
torch::Tensor values,
float scaler,
int vec_length,
int batch_size)
{
int m = row_indices.size(0);
dim3 block, grid;
block.x = 128;
grid.x = m;
grid.y = batch_size;
auto attn = torch::empty_like(values);
int stride = values.numel()/batch_size;
switch(vec_length){
case 8:
batchedCsrSoftmaxKernel<8, float4, 128><<<grid, block>>>(
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
stride,
reinterpret_cast<half *>(attn.data<torch::Half>()),
stride, scaler
);
break;
case 4:
batchedCsrSoftmaxKernel<4, float2, 128><<<grid, block>>>(
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
stride,
reinterpret_cast<half *>(attn.data<torch::Half>()),
stride,
scaler
);
break;
case 2:
batchedCsrSoftmaxKernel<2, float, 128><<<grid, block>>>(
row_indices.data<int>(), row_offsets.data<int>(),
reinterpret_cast<half *>(values.data<torch::Half>()),
stride,
reinterpret_cast<half *>(attn.data<torch::Half>()),
stride,
scaler
);
break;
}
return attn;
} |
b1fa8b354109718bbd4a52a72299606d2ff9b99a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Cortex.cuh"
#include <iostream>
#include "sm_60_atomic_functions.h"
#include "CUDAHelper.cuh"
__global__ void cort_image_kernel(double *d_img, double *d_img_vector, SamplingPoint *d_fields,
uint2 cortImgSize, size_t locSize, size_t vecLen, bool rgb) {
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (locSize <= globalIndex)
return;
int channel = globalIndex / (locSize / (rgb ? 3 : 1));
int offset = channel * cortImgSize.x * cortImgSize.y;
int index = globalIndex % (locSize / (rgb ? 3 : 1));
int vecOffset = channel * vecLen;
SamplingPoint *point = &d_fields[index];
int kernelSize = point->_kernelSize;
double *kernel = point->d_kernel;
int X = point->_x - (float)kernelSize/2.0 + 0.5;
int Y = point->_y - (float)kernelSize/2.0 + 0.5;
double value = d_img_vector[vecOffset + d_fields[index]._i];
for (int i = 0; i != kernelSize; ++i) {
for (int j = 0; j != kernelSize; ++j) {
if (X + j >= 0 && Y + i >= 0 && X + j < cortImgSize.x && Y + i < cortImgSize.y)
atomicAdd(&d_img[offset + (Y + i) * cortImgSize.x + X + j], value * kernel[i * kernelSize + j]);
}
}
}
__global__ void normalise(uchar *d_norm, double *d_image, float *normaliser, size_t size, bool rgb) {
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (size <= globalIndex)
return;
int index = globalIndex % (size / (rgb ? 3 : 1));
d_norm[globalIndex] = normaliser[index] == 0.0 ? 0 : (int)(d_image[globalIndex] / normaliser[index]);
}
template <class T>
void setPointerToNull(T **d_ptr) {
if (*d_ptr != nullptr){
hipFree(*d_ptr);
cudaCheckErrors("ERROR");
*d_ptr = nullptr;
}
}
Cortex::~Cortex() {
removeCortexFields(&d_leftFields, _leftCortexSize);
removeCortexFields(&d_rightFields, _rightCortexSize);
setPointerToNull(&d_leftNorm);
setPointerToNull(&d_rightNorm);
}
int Cortex::cortImage(double *h_imageVector, size_t vecLen, float **d_norm, uchar *h_result,
size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector,
SamplingPoint *d_fields, size_t locSize) {
if (!isReady())
return ERRORS::uninitialized;
if ((h_imageVector == nullptr && d_imageVector == nullptr) || h_result == nullptr)
return ERRORS::invalidArguments;
if (cortImgX != _cortImgSize.x || cortImgY != _cortImgSize.y || rgb != _rgb ||
vecLen != _channels * (_leftCortexSize + _rightCortexSize))
return ERRORS::imageParametersDidNotMatch;
double *d_img;
hipMalloc((void**)&d_img, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double));
hipMemset(d_img, 0.0, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double));
double *_d_imageVector;
if (d_imageVector != nullptr)
_d_imageVector = d_imageVector;
else {
hipMalloc((void**)&_d_imageVector, _channels * (_leftCortexSize + _rightCortexSize) * sizeof(double));
hipMemcpy(_d_imageVector, h_imageVector, _channels * (_leftCortexSize + _rightCortexSize) * sizeof(double), hipMemcpyHostToDevice);
}
hipLaunchKernelGGL(( cort_image_kernel), dim3(ceil(_channels * locSize / 512.0)), dim3(512), 0, 0,
d_img, _d_imageVector, d_fields, _cortImgSize, _channels * locSize,
_leftCortexSize + _rightCortexSize, _rgb);
hipDeviceSynchronize();
cudaCheckErrors("ERROR");
uchar *d_normalised;
hipMalloc((void**)&d_normalised, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(uchar));
hipLaunchKernelGGL(( normalise), dim3(ceil(_channels * _cortImgSize.x * _cortImgSize.y / 512.0)), dim3(512), 0, 0,
d_normalised, d_img, *d_norm, _channels * _cortImgSize.x * _cortImgSize.y, rgb);
hipDeviceSynchronize();
cudaCheckErrors("ERROR");
hipMemcpy(h_result, d_normalised, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(uchar), hipMemcpyDeviceToHost);
cudaCheckErrors("ERROR");
hipFree(d_normalised);
if (d_imageVector == nullptr)
hipFree(_d_imageVector);
hipFree(d_img);
return 0;
}
int Cortex::cortImageLeft(double *h_imageVector, size_t vecLen, uchar *h_result,
size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector) {
return cortImage(h_imageVector, vecLen, &d_leftNorm, h_result, cortImgX, cortImgY, rgb,
d_imageVector, d_leftFields, _leftCortexSize);
}
int Cortex::cortImageRight(double *h_imageVector, size_t vecLen, uchar *h_result,
size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector) {
return cortImage(h_imageVector, vecLen, &d_rightNorm, h_result, cortImgX, cortImgY, rgb,
d_imageVector, d_rightFields, _rightCortexSize);
}
bool Cortex::isReady() const {
return _leftCortexSize != 0 && _rightCortexSize != 0 &&
d_leftFields != nullptr && d_rightFields != nullptr &&
d_leftNorm != nullptr && d_rightNorm != nullptr &&
_cortImgSize.x != 0 && _cortImgSize.y != 0;
}
void Cortex::setRGB(const bool rgb) {
if (rgb == _rgb)
return;
_rgb = rgb;
_channels = _rgb ? 3 : 1;
}
void Cortex::setCortImageSize(uint2 cortImgSize) {
if (cortImgSize.x == _cortImgSize.x && cortImgSize.y == _cortImgSize.y)
return;
setPointerToNull(&d_leftNorm);
setPointerToNull(&d_rightNorm);
_cortImgSize = cortImgSize;
}
error Cortex::setLeftCortexFields(SamplingPoint *h_leftFields, size_t leftSize) {
return setCortexFields(h_leftFields, &d_leftFields, leftSize, _leftCortexSize);
}
error Cortex::setRightCortexFields(SamplingPoint *h_rightFields, size_t rightSize) {
return setCortexFields(h_rightFields, &d_rightFields, rightSize, _rightCortexSize);
}
error Cortex::setLeftNorm(const float *h_leftNorm, size_t leftNormSize) {
size_t tmp;
return setOnDevice(h_leftNorm, leftNormSize, &d_leftNorm, tmp);
}
error Cortex::setRightNorm(const float *h_rightNorm, size_t rightNormSize){
size_t tmp;
return setOnDevice(h_rightNorm, rightNormSize, &d_rightNorm, tmp);
}
error Cortex::setCortexFields(SamplingPoint *h_fields, SamplingPoint **d_fields, const size_t &h_size, size_t &d_size) {
if (h_fields == nullptr)
return ERRORS::invalidArguments;
removeCortexFields(d_fields, d_size);
for (int i = 0; i != h_size; ++i) {
h_fields[i].copyToDevice();
}
hipMalloc((void**)d_fields, sizeof(SamplingPoint) * h_size);
hipMemcpy(*d_fields, h_fields, sizeof(SamplingPoint) * h_size, hipMemcpyHostToDevice);
cudaCheckErrors("ERROR");
d_size = h_size;
return 0;
}
error Cortex::removeCortexFields(SamplingPoint **d_fields, size_t &d_size) {
if (*d_fields != nullptr) {
SamplingPoint *h_fields = (SamplingPoint*)malloc(sizeof(SamplingPoint) * d_size);
hipMemcpy(h_fields, *d_fields, sizeof(SamplingPoint) * d_size, hipMemcpyDeviceToHost);
for (int i = 0; i != d_size; ++i)
h_fields[i].removeFromDevice();
free(h_fields);
setPointerToNull(d_fields);
cudaCheckErrors("ERROR");
}
return 0;
}
template <class T>
error Cortex::getFromDevice(T *h_ptr, const size_t h_size, const T *d_ptr, const size_t d_size) const {
if (h_ptr == nullptr || h_size == 0)
return ERRORS::invalidArguments;
if (h_size != d_size)
return ERRORS::cortexSizeDidNotMatch;
if (d_ptr == nullptr)
return ERRORS::uninitialized;
hipMemcpy(h_ptr, d_ptr, sizeof(T) * d_size, hipMemcpyDeviceToHost);
cudaCheckErrors("ERROR");
return 0;
}
template <class T>
error Cortex::setOnDevice(const T *h_ptr, size_t h_size, T **d_ptr, size_t &d_size) {
if (h_ptr == nullptr || h_size == 0)
return ERRORS::invalidArguments;
setPointerToNull(d_ptr);
hipMalloc((void**)d_ptr, sizeof(T) * h_size);
hipMemcpy(*d_ptr, h_ptr, sizeof(T) * h_size, hipMemcpyHostToDevice);
d_size = h_size;
cudaCheckErrors("ERROR");
return 0;
}
| b1fa8b354109718bbd4a52a72299606d2ff9b99a.cu | #include "Cortex.cuh"
#include <iostream>
#include "sm_60_atomic_functions.h"
#include "CUDAHelper.cuh"
__global__ void cort_image_kernel(double *d_img, double *d_img_vector, SamplingPoint *d_fields,
uint2 cortImgSize, size_t locSize, size_t vecLen, bool rgb) {
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (locSize <= globalIndex)
return;
int channel = globalIndex / (locSize / (rgb ? 3 : 1));
int offset = channel * cortImgSize.x * cortImgSize.y;
int index = globalIndex % (locSize / (rgb ? 3 : 1));
int vecOffset = channel * vecLen;
SamplingPoint *point = &d_fields[index];
int kernelSize = point->_kernelSize;
double *kernel = point->d_kernel;
int X = point->_x - (float)kernelSize/2.0 + 0.5;
int Y = point->_y - (float)kernelSize/2.0 + 0.5;
double value = d_img_vector[vecOffset + d_fields[index]._i];
for (int i = 0; i != kernelSize; ++i) {
for (int j = 0; j != kernelSize; ++j) {
if (X + j >= 0 && Y + i >= 0 && X + j < cortImgSize.x && Y + i < cortImgSize.y)
atomicAdd(&d_img[offset + (Y + i) * cortImgSize.x + X + j], value * kernel[i * kernelSize + j]);
}
}
}
__global__ void normalise(uchar *d_norm, double *d_image, float *normaliser, size_t size, bool rgb) {
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (size <= globalIndex)
return;
int index = globalIndex % (size / (rgb ? 3 : 1));
d_norm[globalIndex] = normaliser[index] == 0.0 ? 0 : (int)(d_image[globalIndex] / normaliser[index]);
}
template <class T>
void setPointerToNull(T **d_ptr) {
if (*d_ptr != nullptr){
cudaFree(*d_ptr);
cudaCheckErrors("ERROR");
*d_ptr = nullptr;
}
}
Cortex::~Cortex() {
removeCortexFields(&d_leftFields, _leftCortexSize);
removeCortexFields(&d_rightFields, _rightCortexSize);
setPointerToNull(&d_leftNorm);
setPointerToNull(&d_rightNorm);
}
int Cortex::cortImage(double *h_imageVector, size_t vecLen, float **d_norm, uchar *h_result,
size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector,
SamplingPoint *d_fields, size_t locSize) {
if (!isReady())
return ERRORS::uninitialized;
if ((h_imageVector == nullptr && d_imageVector == nullptr) || h_result == nullptr)
return ERRORS::invalidArguments;
if (cortImgX != _cortImgSize.x || cortImgY != _cortImgSize.y || rgb != _rgb ||
vecLen != _channels * (_leftCortexSize + _rightCortexSize))
return ERRORS::imageParametersDidNotMatch;
double *d_img;
cudaMalloc((void**)&d_img, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double));
cudaMemset(d_img, 0.0, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double));
double *_d_imageVector;
if (d_imageVector != nullptr)
_d_imageVector = d_imageVector;
else {
cudaMalloc((void**)&_d_imageVector, _channels * (_leftCortexSize + _rightCortexSize) * sizeof(double));
cudaMemcpy(_d_imageVector, h_imageVector, _channels * (_leftCortexSize + _rightCortexSize) * sizeof(double), cudaMemcpyHostToDevice);
}
cort_image_kernel<<<ceil(_channels * locSize / 512.0), 512>>>(
d_img, _d_imageVector, d_fields, _cortImgSize, _channels * locSize,
_leftCortexSize + _rightCortexSize, _rgb);
cudaDeviceSynchronize();
cudaCheckErrors("ERROR");
uchar *d_normalised;
cudaMalloc((void**)&d_normalised, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(uchar));
normalise<<<ceil(_channels * _cortImgSize.x * _cortImgSize.y / 512.0), 512>>>(
d_normalised, d_img, *d_norm, _channels * _cortImgSize.x * _cortImgSize.y, rgb);
cudaDeviceSynchronize();
cudaCheckErrors("ERROR");
cudaMemcpy(h_result, d_normalised, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(uchar), cudaMemcpyDeviceToHost);
cudaCheckErrors("ERROR");
cudaFree(d_normalised);
if (d_imageVector == nullptr)
cudaFree(_d_imageVector);
cudaFree(d_img);
return 0;
}
int Cortex::cortImageLeft(double *h_imageVector, size_t vecLen, uchar *h_result,
size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector) {
return cortImage(h_imageVector, vecLen, &d_leftNorm, h_result, cortImgX, cortImgY, rgb,
d_imageVector, d_leftFields, _leftCortexSize);
}
int Cortex::cortImageRight(double *h_imageVector, size_t vecLen, uchar *h_result,
size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector) {
return cortImage(h_imageVector, vecLen, &d_rightNorm, h_result, cortImgX, cortImgY, rgb,
d_imageVector, d_rightFields, _rightCortexSize);
}
bool Cortex::isReady() const {
return _leftCortexSize != 0 && _rightCortexSize != 0 &&
d_leftFields != nullptr && d_rightFields != nullptr &&
d_leftNorm != nullptr && d_rightNorm != nullptr &&
_cortImgSize.x != 0 && _cortImgSize.y != 0;
}
void Cortex::setRGB(const bool rgb) {
if (rgb == _rgb)
return;
_rgb = rgb;
_channels = _rgb ? 3 : 1;
}
void Cortex::setCortImageSize(uint2 cortImgSize) {
if (cortImgSize.x == _cortImgSize.x && cortImgSize.y == _cortImgSize.y)
return;
setPointerToNull(&d_leftNorm);
setPointerToNull(&d_rightNorm);
_cortImgSize = cortImgSize;
}
error Cortex::setLeftCortexFields(SamplingPoint *h_leftFields, size_t leftSize) {
return setCortexFields(h_leftFields, &d_leftFields, leftSize, _leftCortexSize);
}
error Cortex::setRightCortexFields(SamplingPoint *h_rightFields, size_t rightSize) {
return setCortexFields(h_rightFields, &d_rightFields, rightSize, _rightCortexSize);
}
error Cortex::setLeftNorm(const float *h_leftNorm, size_t leftNormSize) {
size_t tmp;
return setOnDevice(h_leftNorm, leftNormSize, &d_leftNorm, tmp);
}
error Cortex::setRightNorm(const float *h_rightNorm, size_t rightNormSize){
size_t tmp;
return setOnDevice(h_rightNorm, rightNormSize, &d_rightNorm, tmp);
}
error Cortex::setCortexFields(SamplingPoint *h_fields, SamplingPoint **d_fields, const size_t &h_size, size_t &d_size) {
if (h_fields == nullptr)
return ERRORS::invalidArguments;
removeCortexFields(d_fields, d_size);
for (int i = 0; i != h_size; ++i) {
h_fields[i].copyToDevice();
}
cudaMalloc((void**)d_fields, sizeof(SamplingPoint) * h_size);
cudaMemcpy(*d_fields, h_fields, sizeof(SamplingPoint) * h_size, cudaMemcpyHostToDevice);
cudaCheckErrors("ERROR");
d_size = h_size;
return 0;
}
error Cortex::removeCortexFields(SamplingPoint **d_fields, size_t &d_size) {
if (*d_fields != nullptr) {
SamplingPoint *h_fields = (SamplingPoint*)malloc(sizeof(SamplingPoint) * d_size);
cudaMemcpy(h_fields, *d_fields, sizeof(SamplingPoint) * d_size, cudaMemcpyDeviceToHost);
for (int i = 0; i != d_size; ++i)
h_fields[i].removeFromDevice();
free(h_fields);
setPointerToNull(d_fields);
cudaCheckErrors("ERROR");
}
return 0;
}
template <class T>
error Cortex::getFromDevice(T *h_ptr, const size_t h_size, const T *d_ptr, const size_t d_size) const {
if (h_ptr == nullptr || h_size == 0)
return ERRORS::invalidArguments;
if (h_size != d_size)
return ERRORS::cortexSizeDidNotMatch;
if (d_ptr == nullptr)
return ERRORS::uninitialized;
cudaMemcpy(h_ptr, d_ptr, sizeof(T) * d_size, cudaMemcpyDeviceToHost);
cudaCheckErrors("ERROR");
return 0;
}
template <class T>
error Cortex::setOnDevice(const T *h_ptr, size_t h_size, T **d_ptr, size_t &d_size) {
if (h_ptr == nullptr || h_size == 0)
return ERRORS::invalidArguments;
setPointerToNull(d_ptr);
cudaMalloc((void**)d_ptr, sizeof(T) * h_size);
cudaMemcpy(*d_ptr, h_ptr, sizeof(T) * h_size, cudaMemcpyHostToDevice);
d_size = h_size;
cudaCheckErrors("ERROR");
return 0;
}
|
f23c6c0ee881d4025fcb16695853a7357636c3c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_kernel.cuh"
__global__ void dequantization_kernel(context* ctx, uint8_t *input_depth, float *output_depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x + y * blockDim.x * gridDim.x;
if(input_depth[idx] == 0) {
output_depth[idx] = 0;
return;
}
float maxdisp = fB / ctx->min_depth;
float mindisp = fB / ctx->max_depth;
output_depth[idx] = (float)((float)input_depth[idx] / 255.f * (maxdisp - mindisp) + mindisp);
output_depth[idx] = (float)(fB / (float)output_depth[idx]);
}
__global__ void integrate_kernel(context* ctx)
{
int z_voxel = threadIdx.x + blockIdx.x * blockDim.x;
int y_voxel = threadIdx.y + blockIdx.y * blockDim.y;
float world_x, world_y, world_z;
float camera_x, camera_y, camera_z;
int pix_x, pix_y;
float old_r, old_g, old_b;
float new_r, new_g, new_b;
float weight;
__shared__ float tsdf[32][32];
__shared__ uint8_t color[32][32][3];
// each cuda thread handles one volume line(x axis)
for(int x_voxel = 0; x_voxel < ctx->resolution[2]; x_voxel++) {
weight = 0;
tsdf[threadIdx.x][threadIdx.y] = 1;
color[threadIdx.x][threadIdx.y][0] = 0;
color[threadIdx.x][threadIdx.y][1] = 0;
color[threadIdx.x][threadIdx.y][2] = 0;
int voxel_idx = z_voxel * DIM_Y * DIM_X + y_voxel * DIM_X + x_voxel;
// for each voxel, loop for all views
for(int cam_idx = 0; cam_idx < CAM_NUM; cam_idx++) {
float fx = ctx->krt[cam_idx].fx;
float fy = ctx->krt[cam_idx].fy;
float cx = ctx->krt[cam_idx].cx;
float cy = ctx->krt[cam_idx].cy;
float* R = ctx->krt[cam_idx].R;
float* T = ctx->krt[cam_idx].T;
// convert voxel index to world points position
world_x = world_x0 + x_voxel * ctx->voxel_size;
world_y = world_y0 + y_voxel * ctx->voxel_size;
world_z = world_z0 + z_voxel * ctx->voxel_size;
// convert point from world to camera coordinate
world_x -= T[0];
world_y -= T[1];
world_z -= T[2];
camera_x = R[0] * world_x + R[1] * world_y + R[2] * world_z;
camera_y = R[3] * world_x + R[4] * world_y + R[5] * world_z;
camera_z = R[6] * world_x + R[7] * world_y + R[8] * world_z;
if(camera_z <= 0) {
continue;
}
// convert point from camera to pixel coorinate
pix_x = roundf(fx * camera_x / camera_z + cx);
pix_y = roundf(fy * camera_y / camera_z + cy);
int pix_idx = pix_y * WIDTH + pix_x;
if(pix_x < 0 || pix_x >= WIDTH || pix_y < 0 || pix_y >= HEIGHT) {
continue;
}
float depth_value = ctx->depth[WIDTH * HEIGHT * cam_idx + pix_idx];
new_r = ctx->in_buf_color[WIDTH * HEIGHT * 3 * cam_idx + 3 * pix_idx + 0];
new_g = ctx->in_buf_color[WIDTH * HEIGHT * 3 * cam_idx + 3 * pix_idx + 1];
new_b = ctx->in_buf_color[WIDTH * HEIGHT * 3 * cam_idx + 3 * pix_idx + 2];
if(depth_value == 0 || new_r == 0 || new_g == 0 || new_b == 0) {
continue;
}
float diff = depth_value - camera_z;
if (diff <= -ctx->trunc_margin) {
continue;
}
float dist = fmin(1.0f, diff / ctx->trunc_margin);
// update TSDF and weight
tsdf[threadIdx.x][threadIdx.y] = (tsdf[threadIdx.x][threadIdx.y] * weight + dist) / (weight + 1.0f);
// ctx->tsdf_voxel[voxel_idx] = (ctx->tsdf_voxel[voxel_idx] * weight + dist) / (weight + 1.0f);
weight += 1.0f;
// update color
old_r = color[threadIdx.x][threadIdx.y][0];
old_g = color[threadIdx.x][threadIdx.y][1];
old_b = color[threadIdx.x][threadIdx.y][2];
color[threadIdx.x][threadIdx.y][0] = (uint8_t)fminf((float)(old_r * weight + new_r * 1.0f) / (weight + 1.0f), 255);
color[threadIdx.x][threadIdx.y][1] = (uint8_t)fminf((float)(old_g * weight + new_g * 1.0f) / (weight + 1.0f), 255);
color[threadIdx.x][threadIdx.y][2] = (uint8_t)fminf((float)(old_b * weight + new_b * 1.0f) / (weight + 1.0f), 255);
}
// copy tsdf and color from shared memory to global memory
if(weight < WEIGHT_THRESHOLD) {
ctx->tsdf_voxel[voxel_idx] = 2 * TSDF_THRESHOLD;
}
else {
ctx->tsdf_voxel[voxel_idx] = tsdf[threadIdx.x][threadIdx.y];
}
ctx->color_voxel[3 * voxel_idx + 0] = color[threadIdx.x][threadIdx.y][0];
ctx->color_voxel[3 * voxel_idx + 1] = color[threadIdx.x][threadIdx.y][1];
ctx->color_voxel[3 * voxel_idx + 2] = color[threadIdx.x][threadIdx.y][2];
}
}
__global__ void depth_to_world_pcd(context* ctx, int cam_idx)
{
float fx = ctx->krt[cam_idx].fx;
float fy = ctx->krt[cam_idx].fy;
float cx = ctx->krt[cam_idx].cx;
float cy = ctx->krt[cam_idx].cy;
float* R = ctx->krt[cam_idx].R;
float* T = ctx->krt[cam_idx].T;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x + y * blockDim.x * gridDim.x;
float depth_val = ctx->depth[idx];
if(depth_val == 0) {
return;
}
float camera_x = (x - cx) * depth_val / fx;
float camera_y = (y - cy) * depth_val / fy;
float camera_z = depth_val;
float world_x = R[0] * camera_x + R[3] * camera_y + R[6] * camera_z + T[0];
float world_y = R[1] * camera_x + R[4] * camera_y + R[7] * camera_z + T[1];
float world_z = R[2] * camera_x + R[5] * camera_y + R[8] * camera_z + T[2];
ctx->pcd[3 * idx + 0] = world_x;
ctx->pcd[3 * idx + 1] = world_y;
ctx->pcd[3 * idx + 2] = world_z;
}
| f23c6c0ee881d4025fcb16695853a7357636c3c7.cu | #include "cuda_kernel.cuh"
__global__ void dequantization_kernel(context* ctx, uint8_t *input_depth, float *output_depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x + y * blockDim.x * gridDim.x;
if(input_depth[idx] == 0) {
output_depth[idx] = 0;
return;
}
float maxdisp = fB / ctx->min_depth;
float mindisp = fB / ctx->max_depth;
output_depth[idx] = (float)((float)input_depth[idx] / 255.f * (maxdisp - mindisp) + mindisp);
output_depth[idx] = (float)(fB / (float)output_depth[idx]);
}
__global__ void integrate_kernel(context* ctx)
{
int z_voxel = threadIdx.x + blockIdx.x * blockDim.x;
int y_voxel = threadIdx.y + blockIdx.y * blockDim.y;
float world_x, world_y, world_z;
float camera_x, camera_y, camera_z;
int pix_x, pix_y;
float old_r, old_g, old_b;
float new_r, new_g, new_b;
float weight;
__shared__ float tsdf[32][32];
__shared__ uint8_t color[32][32][3];
// each cuda thread handles one volume line(x axis)
for(int x_voxel = 0; x_voxel < ctx->resolution[2]; x_voxel++) {
weight = 0;
tsdf[threadIdx.x][threadIdx.y] = 1;
color[threadIdx.x][threadIdx.y][0] = 0;
color[threadIdx.x][threadIdx.y][1] = 0;
color[threadIdx.x][threadIdx.y][2] = 0;
int voxel_idx = z_voxel * DIM_Y * DIM_X + y_voxel * DIM_X + x_voxel;
// for each voxel, loop for all views
for(int cam_idx = 0; cam_idx < CAM_NUM; cam_idx++) {
float fx = ctx->krt[cam_idx].fx;
float fy = ctx->krt[cam_idx].fy;
float cx = ctx->krt[cam_idx].cx;
float cy = ctx->krt[cam_idx].cy;
float* R = ctx->krt[cam_idx].R;
float* T = ctx->krt[cam_idx].T;
// convert voxel index to world points position
world_x = world_x0 + x_voxel * ctx->voxel_size;
world_y = world_y0 + y_voxel * ctx->voxel_size;
world_z = world_z0 + z_voxel * ctx->voxel_size;
// convert point from world to camera coordinate
world_x -= T[0];
world_y -= T[1];
world_z -= T[2];
camera_x = R[0] * world_x + R[1] * world_y + R[2] * world_z;
camera_y = R[3] * world_x + R[4] * world_y + R[5] * world_z;
camera_z = R[6] * world_x + R[7] * world_y + R[8] * world_z;
if(camera_z <= 0) {
continue;
}
// convert point from camera to pixel coorinate
pix_x = roundf(fx * camera_x / camera_z + cx);
pix_y = roundf(fy * camera_y / camera_z + cy);
int pix_idx = pix_y * WIDTH + pix_x;
if(pix_x < 0 || pix_x >= WIDTH || pix_y < 0 || pix_y >= HEIGHT) {
continue;
}
float depth_value = ctx->depth[WIDTH * HEIGHT * cam_idx + pix_idx];
new_r = ctx->in_buf_color[WIDTH * HEIGHT * 3 * cam_idx + 3 * pix_idx + 0];
new_g = ctx->in_buf_color[WIDTH * HEIGHT * 3 * cam_idx + 3 * pix_idx + 1];
new_b = ctx->in_buf_color[WIDTH * HEIGHT * 3 * cam_idx + 3 * pix_idx + 2];
if(depth_value == 0 || new_r == 0 || new_g == 0 || new_b == 0) {
continue;
}
float diff = depth_value - camera_z;
if (diff <= -ctx->trunc_margin) {
continue;
}
float dist = fmin(1.0f, diff / ctx->trunc_margin);
// update TSDF and weight
tsdf[threadIdx.x][threadIdx.y] = (tsdf[threadIdx.x][threadIdx.y] * weight + dist) / (weight + 1.0f);
// ctx->tsdf_voxel[voxel_idx] = (ctx->tsdf_voxel[voxel_idx] * weight + dist) / (weight + 1.0f);
weight += 1.0f;
// update color
old_r = color[threadIdx.x][threadIdx.y][0];
old_g = color[threadIdx.x][threadIdx.y][1];
old_b = color[threadIdx.x][threadIdx.y][2];
color[threadIdx.x][threadIdx.y][0] = (uint8_t)fminf((float)(old_r * weight + new_r * 1.0f) / (weight + 1.0f), 255);
color[threadIdx.x][threadIdx.y][1] = (uint8_t)fminf((float)(old_g * weight + new_g * 1.0f) / (weight + 1.0f), 255);
color[threadIdx.x][threadIdx.y][2] = (uint8_t)fminf((float)(old_b * weight + new_b * 1.0f) / (weight + 1.0f), 255);
}
// copy tsdf and color from shared memory to global memory
if(weight < WEIGHT_THRESHOLD) {
ctx->tsdf_voxel[voxel_idx] = 2 * TSDF_THRESHOLD;
}
else {
ctx->tsdf_voxel[voxel_idx] = tsdf[threadIdx.x][threadIdx.y];
}
ctx->color_voxel[3 * voxel_idx + 0] = color[threadIdx.x][threadIdx.y][0];
ctx->color_voxel[3 * voxel_idx + 1] = color[threadIdx.x][threadIdx.y][1];
ctx->color_voxel[3 * voxel_idx + 2] = color[threadIdx.x][threadIdx.y][2];
}
}
__global__ void depth_to_world_pcd(context* ctx, int cam_idx)
{
float fx = ctx->krt[cam_idx].fx;
float fy = ctx->krt[cam_idx].fy;
float cx = ctx->krt[cam_idx].cx;
float cy = ctx->krt[cam_idx].cy;
float* R = ctx->krt[cam_idx].R;
float* T = ctx->krt[cam_idx].T;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x + y * blockDim.x * gridDim.x;
float depth_val = ctx->depth[idx];
if(depth_val == 0) {
return;
}
float camera_x = (x - cx) * depth_val / fx;
float camera_y = (y - cy) * depth_val / fy;
float camera_z = depth_val;
float world_x = R[0] * camera_x + R[3] * camera_y + R[6] * camera_z + T[0];
float world_y = R[1] * camera_x + R[4] * camera_y + R[7] * camera_z + T[1];
float world_z = R[2] * camera_x + R[5] * camera_y + R[8] * camera_z + T[2];
ctx->pcd[3 * idx + 0] = world_x;
ctx->pcd[3 * idx + 1] = world_y;
ctx->pcd[3 * idx + 2] = world_z;
}
|
a310f72fa65fc67656124583c12f9e39d83fb416.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include<cstdlib>
#include<cstdio>
using namespace std;
template<class T>
void print(T a){
cout << a << endl;
}
//all kernels have the 'void' return type
//global --> 1) runs on the device 2) called from the host code
// nvcc separates src code into host and device components
// host functions processed by the standard host compiler
// device funtions processed by the nvcc
__global__ void mykernel(void){
return;
}
__global__ void addInt(int *a, int *b, int *c){
*c = *a + *b;
}
void testAddInt(){
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(double);//allocate space
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
a = 7, b = 2;
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addInt), dim3(1), dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
cout << c << endl;
}
__global__ void addIntBlks(double* a, double *b, double *c){
// __syncthreads();
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
printf("BlockIdx.x: %d\n", blockIdx.x);
}
void testAddIntBlks(){
// size_t N = 1<<25;
size_t N = 10;
double *d_a, *d_b, *d_c;
size_t size = sizeof(double) * N;
double *a = (double *)malloc(size);
double *b = (double *)malloc(size);
double *c = (double *)malloc(size);
for (size_t i = 0; i < N; i++){
a[i] = drand48();
b[i] = drand48();
}
//allocate space
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addIntBlks), dim3(N), dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
srand(1);
cout << rand() << " " << rand() % N << endl;
size_t i = rand() % N;
cout << N << " " << i << endl;
cout << "a[" << i << "] + b[" << i << "] = " << a[i] + b[i] << endl;
cout << "c[" << i << "] =" << c[i] << endl;
double res = 0;
for (size_t i = 0; i < N; i++){
res += a[i] + b[i] - c[i];
}
cout << res << endl;
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
int main(){
cout << "hello world!" << endl;
print<string>(string("shabi"));
hipLaunchKernelGGL(( mykernel), dim3(1),dim3(1), 0, 0, );
testAddInt();
testAddIntBlks();
return 0;
} | a310f72fa65fc67656124583c12f9e39d83fb416.cu | #include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include<cstdlib>
#include<cstdio>
using namespace std;
template<class T>
void print(T a){
cout << a << endl;
}
//all kernels have the 'void' return type
//global --> 1) runs on the device 2) called from the host code
// nvcc separates src code into host and device components
// host functions processed by the standard host compiler
// device funtions processed by the nvcc
__global__ void mykernel(void){
return;
}
__global__ void addInt(int *a, int *b, int *c){
*c = *a + *b;
}
void testAddInt(){
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(double);//allocate space
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
a = 7, b = 2;
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
addInt<<<1, 1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cout << c << endl;
}
__global__ void addIntBlks(double* a, double *b, double *c){
// __syncthreads();
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
printf("BlockIdx.x: %d\n", blockIdx.x);
}
void testAddIntBlks(){
// size_t N = 1<<25;
size_t N = 10;
double *d_a, *d_b, *d_c;
size_t size = sizeof(double) * N;
double *a = (double *)malloc(size);
double *b = (double *)malloc(size);
double *c = (double *)malloc(size);
for (size_t i = 0; i < N; i++){
a[i] = drand48();
b[i] = drand48();
}
//allocate space
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
addIntBlks<<<N, 1>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
srand(1);
cout << rand() << " " << rand() % N << endl;
size_t i = rand() % N;
cout << N << " " << i << endl;
cout << "a[" << i << "] + b[" << i << "] = " << a[i] + b[i] << endl;
cout << "c[" << i << "] =" << c[i] << endl;
double res = 0;
for (size_t i = 0; i < N; i++){
res += a[i] + b[i] - c[i];
}
cout << res << endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
int main(){
cout << "hello world!" << endl;
print<string>(string("shabi"));
mykernel<<<1,1>>>();
testAddInt();
testAddIntBlks();
return 0;
} |
ccf5318b280268a5bc3cc412885ccc67ec393fad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/ztrtri_diag_batched.cu normal z -> s, Tue Feb 9 16:05:39 2016
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named strtri_diag.cu to avoid name conflict with src/strtri.o
in the library. The actual kernels are in strtri_lower.cu and strtri_upper.cu
*/
#include "magma_internal.h"
#include "strtri.cuh"
/**
Purpose
-------
STRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in strsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a REAL array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_strtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
float const * const *dA_array, magma_int_t ldda,
float **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_slaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_S_ZERO, MAGMA_S_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_slaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( strtri_diag_lower_kernel_batched)
, dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_sgemm16_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm16_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_sgemm32_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm32_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_sgemm64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_sgemm_above64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part3_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( strtri_diag_upper_kernel_batched)
, dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_sgemm16_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm16_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_sgemm32_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm32_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_sgemm64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_sgemm_above64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part3_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
| ccf5318b280268a5bc3cc412885ccc67ec393fad.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/ztrtri_diag_batched.cu normal z -> s, Tue Feb 9 16:05:39 2016
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named strtri_diag.cu to avoid name conflict with src/strtri.o
in the library. The actual kernels are in strtri_lower.cu and strtri_upper.cu
*/
#include "magma_internal.h"
#include "strtri.cuh"
/**
Purpose
-------
STRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in strsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a REAL array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_strtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
float const * const *dA_array, magma_int_t ldda,
float **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_slaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_S_ZERO, MAGMA_S_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_slaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
strtri_diag_lower_kernel_batched
<<< diaggrid, IB, 0, queue->cuda_stream() >>>
( diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_sgemm16_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm16_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_sgemm32_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm32_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_sgemm64_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm64_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_sgemm_above64_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm_above64_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm_above64_part3_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
strtri_diag_upper_kernel_batched
<<< diaggrid, IB, 0, queue->cuda_stream() >>>
( diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_sgemm16_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm16_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_sgemm32_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm32_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_sgemm64_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm64_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_sgemm_above64_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm_above64_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm_above64_part3_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
b6ef4eec6d56508647c7ff09f04d125d8e9f2601.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SDSC SCC Training - GPU Computing and Programming
// May 3, 2019
// Andreas Goetz ([email protected])
// CUDA program that performs 1D stencil operation in parallel on the GPU
//
#include<stdio.h>
// define vector length, stencil radius,
#define N (1024*1024*8l)
#define RADIUS 3
#define GRIDSIZE 128
#define BLOCKSIZE 256
// -------------------------------------------------------
// CUDA device function that performs 1D stencil operation
// -------------------------------------------------------
__global__ void stencil_1D(int *in, int *out, long dim){
__shared__ int temp[BLOCKSIZE + 2*RADIUS];
int lindex = threadIdx.x + RADIUS;
long gindex = threadIdx.x + blockDim.x * blockIdx.x;
int stride = gridDim.x * blockDim.x;
long left, right;
// Go through all data
// Step all threads in a block to avoid synchronization problem
while ( gindex < (dim + blockDim.x) ) {
// Read input elements into shared memory
temp[lindex] = 0;
if (gindex < dim)
temp[lindex] = in[gindex];
// Populate halos, set to zero if we are at the boundary
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = 0;
left = gindex - RADIUS;
if (left >= 0)
temp[lindex - RADIUS] = in[left];
temp[lindex + blockDim.x] = 0;
right = gindex + blockDim.x;
if (right < dim)
temp[lindex + blockDim.x] = in[right];
}
// Synchronize threads - make sure all data is available!
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++) {
result += temp[lindex + offset];
}
// Store the result
if (gindex < dim)
out[gindex] = result;
// Update global index and quit if we are done
gindex += stride;
__syncthreads();
}
}
// ------------
// main program
// ------------
int main(void){
int *h_in, *h_out;
int *d_in, *d_out;
long size = N * sizeof(int);
int i, j, ij, result, err;
// allocate host memory
h_in = new int[N];
h_out = new int[N];
// initialize vector
for (i=0; i<N; i++){
// h_in[i] = i+1;
h_in[i] = 1;
}
// allocate device memory
hipMalloc((void **)&d_in, size);
hipMalloc((void **)&d_out, size);
// copy input data to device
hipMemcpy(d_in, h_in, size, hipMemcpyHostToDevice);
// Apply stencil by launching a sufficient number of blocks
printf("\n---------------------------\n");
printf("Launching 1D stencil kernel\n");
printf("---------------------------\n");
printf("Vector length = %ld (%ld MB)\n",N,N*4/1024/1024);
printf("Stencil radius = %d\n",RADIUS);
printf("Blocks = %d\n",GRIDSIZE);
printf("Threads per block = %d\n",BLOCKSIZE);
printf("Total threads = %d\n",GRIDSIZE*BLOCKSIZE);
hipLaunchKernelGGL(( stencil_1D), dim3(GRIDSIZE),dim3(BLOCKSIZE), 0, 0, d_in, d_out, N);
// copy results back to host
hipMemcpy(h_out, d_out, size, hipMemcpyDeviceToHost);
// deallocate device memory
hipFree(d_in);
hipFree(d_out);
// check results
err = 0;
for (i=0; i<N; i++){
result = 0;
for (j=-RADIUS; j<=RADIUS; j++){
ij = i+j;
if (ij>=0 && ij<N)
result += h_in[ij];
}
if (h_out[i] != result) {
err++;
// printf("h_out[%d]=%d\n",i,h_out[i]);
}
}
if (err != 0){
printf("\n Error, %d elements do not match!\n\n", err);
} else {
printf("\n Success! All elements match CPU result.\n\n");
}
// deallocate host memory
free(h_in);
free(h_out);
return 0;
}
| b6ef4eec6d56508647c7ff09f04d125d8e9f2601.cu | // SDSC SCC Training - GPU Computing and Programming
// May 3, 2019
// Andreas Goetz ([email protected])
// CUDA program that performs 1D stencil operation in parallel on the GPU
//
#include<stdio.h>
// define vector length, stencil radius,
#define N (1024*1024*8l)
#define RADIUS 3
#define GRIDSIZE 128
#define BLOCKSIZE 256
// -------------------------------------------------------
// CUDA device function that performs 1D stencil operation
// -------------------------------------------------------
__global__ void stencil_1D(int *in, int *out, long dim){
__shared__ int temp[BLOCKSIZE + 2*RADIUS];
int lindex = threadIdx.x + RADIUS;
long gindex = threadIdx.x + blockDim.x * blockIdx.x;
int stride = gridDim.x * blockDim.x;
long left, right;
// Go through all data
// Step all threads in a block to avoid synchronization problem
while ( gindex < (dim + blockDim.x) ) {
// Read input elements into shared memory
temp[lindex] = 0;
if (gindex < dim)
temp[lindex] = in[gindex];
// Populate halos, set to zero if we are at the boundary
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = 0;
left = gindex - RADIUS;
if (left >= 0)
temp[lindex - RADIUS] = in[left];
temp[lindex + blockDim.x] = 0;
right = gindex + blockDim.x;
if (right < dim)
temp[lindex + blockDim.x] = in[right];
}
// Synchronize threads - make sure all data is available!
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++) {
result += temp[lindex + offset];
}
// Store the result
if (gindex < dim)
out[gindex] = result;
// Update global index and quit if we are done
gindex += stride;
__syncthreads();
}
}
// ------------
// main program
// ------------
int main(void){
int *h_in, *h_out;
int *d_in, *d_out;
long size = N * sizeof(int);
int i, j, ij, result, err;
// allocate host memory
h_in = new int[N];
h_out = new int[N];
// initialize vector
for (i=0; i<N; i++){
// h_in[i] = i+1;
h_in[i] = 1;
}
// allocate device memory
cudaMalloc((void **)&d_in, size);
cudaMalloc((void **)&d_out, size);
// copy input data to device
cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice);
// Apply stencil by launching a sufficient number of blocks
printf("\n---------------------------\n");
printf("Launching 1D stencil kernel\n");
printf("---------------------------\n");
printf("Vector length = %ld (%ld MB)\n",N,N*4/1024/1024);
printf("Stencil radius = %d\n",RADIUS);
printf("Blocks = %d\n",GRIDSIZE);
printf("Threads per block = %d\n",BLOCKSIZE);
printf("Total threads = %d\n",GRIDSIZE*BLOCKSIZE);
stencil_1D<<<GRIDSIZE,BLOCKSIZE>>>(d_in, d_out, N);
// copy results back to host
cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost);
// deallocate device memory
cudaFree(d_in);
cudaFree(d_out);
// check results
err = 0;
for (i=0; i<N; i++){
result = 0;
for (j=-RADIUS; j<=RADIUS; j++){
ij = i+j;
if (ij>=0 && ij<N)
result += h_in[ij];
}
if (h_out[i] != result) {
err++;
// printf("h_out[%d]=%d\n",i,h_out[i]);
}
}
if (err != 0){
printf("\n Error, %d elements do not match!\n\n", err);
} else {
printf("\n Success! All elements match CPU result.\n\n");
}
// deallocate host memory
free(h_in);
free(h_out);
return 0;
}
|
dd110d95266d18a761da2d71df550b69ffe107bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = threadIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, N * sizeof(int) ) ;
hipMalloc( (void**)&dev_b, N * sizeof(int) );
hipMalloc( (void**)&dev_c, N * sizeof(int) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = i;
b[i] = 2 * i;
}
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy( dev_a, a, N * sizeof(int),
hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, N * sizeof(int),
hipMemcpyHostToDevice );
hipLaunchKernelGGL(( add), dim3(1),dim3(N), 0, 0, dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, N * sizeof(int),
hipMemcpyDeviceToHost );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
}
| dd110d95266d18a761da2d71df550b69ffe107bc.cu | #include <stdio.h>
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = threadIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, N * sizeof(int) ) ;
cudaMalloc( (void**)&dev_b, N * sizeof(int) );
cudaMalloc( (void**)&dev_c, N * sizeof(int) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = i;
b[i] = 2 * i;
}
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, N * sizeof(int),
cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, N * sizeof(int),
cudaMemcpyHostToDevice );
add<<<1,N>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, N * sizeof(int),
cudaMemcpyDeviceToHost );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
0797d4551f567eebd5c2bd690b1041913ef64ebe.hip | // !!! This is a file automatically generated by hipify!!!
// See also GeantCudaUtils.cxx
#include "GeantCudaUtils.h"
#include "Geant/Propagator.h"
#include "Geant/Track.h"
#include "GeantConfig.h"
namespace geant {
inline namespace cuda {
void CoprocessorBrokerInitConstant()
{
double tolerance = 1e-7;
GEANT_CUDA_ERROR(
hipMemcpyToSymbol(device_constant::gTolerance, &(tolerance), sizeof(double), size_t(0), hipMemcpyHostToDevice));
}
} // cuda
} // Geant
| 0797d4551f567eebd5c2bd690b1041913ef64ebe.cu | // See also GeantCudaUtils.cxx
#include "GeantCudaUtils.h"
#include "Geant/Propagator.h"
#include "Geant/Track.h"
#include "GeantConfig.h"
namespace geant {
inline namespace cuda {
void CoprocessorBrokerInitConstant()
{
double tolerance = 1e-7;
GEANT_CUDA_ERROR(
cudaMemcpyToSymbol(device_constant::gTolerance, &(tolerance), sizeof(double), size_t(0), cudaMemcpyHostToDevice));
}
} // cuda
} // Geant
|
6208b0b1bbae62f871eff07f4bdee4fc528b43a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "span.h"
#include "common.h"
#include "gtest/gtest.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#define SPAN_ASSERT_TRUE(cond, status) \
if (!(cond)) { \
*(status) = -1; \
}
#define SPAN_ASSERT_FALSE(cond, status) \
if ((cond)) { \
*(status) = -1; \
}
namespace common {
__global__ void TestFromOtherKernel(Span<float> span) {
// don't get optimized out
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size()) {
return;
}
}
// Test converting different T
__global__ void TestFromOtherKernelConst(Span<float const, 16> span) {
// don't get optimized out
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size()) {
return;
}
}
TEST(GPUSpan, FromOther) {
thrust::host_vector<float> h_vec(16);
std::iota(h_vec.begin(), h_vec.end(), 0);
thrust::device_vector<float> d_vec(h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
// dynamic extent
{
Span<float> span(d_vec.data().get(), d_vec.size());
TestFromOtherKernel << <1, 16 >> > (span);
}
{
Span<float> span(d_vec.data().get(), d_vec.size());
TestFromOtherKernelConst << <1, 16 >> > (span);
}
// static extent
{
Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16);
TestFromOtherKernel << <1, 16 >> > (span);
}
{
Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16);
TestFromOtherKernelConst << <1, 16 >> > (span);
}
}
/**/
struct TestStatus {
private:
int* status_;
public:
TestStatus() {
cuda_handler(hipMalloc(&status_, sizeof(int)));
int h_status = 1;
cuda_handler(hipMemcpy(status_, &h_status,
sizeof(int), hipMemcpyHostToDevice));
}
~TestStatus() {
cuda_handler(hipFree(status_));
}
int Get() {
int h_status;
cuda_handler(hipMemcpy(&h_status, status_,
sizeof(int), hipMemcpyDeviceToHost));
return h_status;
}
int* Data() {
return status_;
}
};
struct TestTestStatus {
int* status_;
TestTestStatus(int* _status) : status_(_status) {}
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
SPAN_ASSERT_TRUE(false, status_);
}
};
struct TestAssignment {
int* status_;
TestAssignment(int* _status) : status_(_status) {}
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
Span<float> s1;
float arr[] = { 3, 4, 5 };
Span<const float> s2 = arr;
SPAN_ASSERT_TRUE(s2.size() == 3, status_);
SPAN_ASSERT_TRUE(s2.data() == &arr[0], status_);
s2 = s1;
SPAN_ASSERT_TRUE(s2.empty(), status_);
}
};
TEST(GPUSpan, Assignment) {
cuda_handler(hipSetDevice(0));
TestStatus status;
LaunchN(16, TestAssignment{ status.Data() });
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, TestStatus) {
cuda_handler(hipSetDevice(0));
TestStatus status;
LaunchN(16, TestTestStatus{ status.Data() });
ASSERT_EQ(status.Get(), -1);
}
template <typename Iter>
XGBOOST_DEVICE void InitializeRange(Iter _begin, Iter _end) {
float j = 0;
for (Iter i = _begin; i != _end; ++i, ++j) {
*i = j;
}
}
__global__ void TestFrontKernel(Span<float> _span) {
_span.front();
}
__global__ void TestBackKernel(Span<float> _span) {
_span.back();
}
TEST(GPUSpan, FrontBack) {
cuda_handler(hipSetDevice(0));
Span<float> s;
auto lambda_test_front = [=]() {
// make sure the termination happens inside this test.
try {
TestFrontKernel << <1, 1 >> > (s);
cuda_handler(hipDeviceSynchronize());
cuda_handler(hipGetLastError());
}
catch (std::runtime_error const& e) {
std::terminate();
}
};
EXPECT_DEATH(lambda_test_front(), "");
auto lambda_test_back = [=]() {
try {
TestBackKernel << <1, 1 >> > (s);
cuda_handler(hipDeviceSynchronize());
cuda_handler(hipGetLastError());
}
catch (std::runtime_error const& e) {
std::terminate();
}
};
EXPECT_DEATH(lambda_test_back(), "");
}
__global__ void TestSubspanDynamicKernel(Span<float> _span) {
_span.subspan(16, 0);
}
__global__ void TestSubspanStaticKernel(Span<float> _span) {
_span.subspan<16>();
}
TEST(GPUSpan, Subspan) {
auto lambda_subspan_dynamic = []() {
thrust::host_vector<float> h_vec(4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec(h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span(d_vec.data().get(), d_vec.size());
TestSubspanDynamicKernel << <1, 1 >> > (span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_subspan_dynamic(), "");
std::string output = testing::internal::GetCapturedStdout();
auto lambda_subspan_static = []() {
thrust::host_vector<float> h_vec(4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec(h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span(d_vec.data().get(), d_vec.size());
TestSubspanStaticKernel << <1, 1 >> > (span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_subspan_static(), "");
output = testing::internal::GetCapturedStdout();
}
}
| 6208b0b1bbae62f871eff07f4bdee4fc528b43a2.cu | #include "span.h"
#include "common.h"
#include "gtest/gtest.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#define SPAN_ASSERT_TRUE(cond, status) \
if (!(cond)) { \
*(status) = -1; \
}
#define SPAN_ASSERT_FALSE(cond, status) \
if ((cond)) { \
*(status) = -1; \
}
namespace common {
__global__ void TestFromOtherKernel(Span<float> span) {
// don't get optimized out
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size()) {
return;
}
}
// Test converting different T
__global__ void TestFromOtherKernelConst(Span<float const, 16> span) {
// don't get optimized out
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size()) {
return;
}
}
TEST(GPUSpan, FromOther) {
thrust::host_vector<float> h_vec(16);
std::iota(h_vec.begin(), h_vec.end(), 0);
thrust::device_vector<float> d_vec(h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
// dynamic extent
{
Span<float> span(d_vec.data().get(), d_vec.size());
TestFromOtherKernel << <1, 16 >> > (span);
}
{
Span<float> span(d_vec.data().get(), d_vec.size());
TestFromOtherKernelConst << <1, 16 >> > (span);
}
// static extent
{
Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16);
TestFromOtherKernel << <1, 16 >> > (span);
}
{
Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16);
TestFromOtherKernelConst << <1, 16 >> > (span);
}
}
/**/
struct TestStatus {
private:
int* status_;
public:
TestStatus() {
cuda_handler(cudaMalloc(&status_, sizeof(int)));
int h_status = 1;
cuda_handler(cudaMemcpy(status_, &h_status,
sizeof(int), cudaMemcpyHostToDevice));
}
~TestStatus() {
cuda_handler(cudaFree(status_));
}
int Get() {
int h_status;
cuda_handler(cudaMemcpy(&h_status, status_,
sizeof(int), cudaMemcpyDeviceToHost));
return h_status;
}
int* Data() {
return status_;
}
};
struct TestTestStatus {
int* status_;
TestTestStatus(int* _status) : status_(_status) {}
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
SPAN_ASSERT_TRUE(false, status_);
}
};
struct TestAssignment {
int* status_;
TestAssignment(int* _status) : status_(_status) {}
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
Span<float> s1;
float arr[] = { 3, 4, 5 };
Span<const float> s2 = arr;
SPAN_ASSERT_TRUE(s2.size() == 3, status_);
SPAN_ASSERT_TRUE(s2.data() == &arr[0], status_);
s2 = s1;
SPAN_ASSERT_TRUE(s2.empty(), status_);
}
};
TEST(GPUSpan, Assignment) {
cuda_handler(cudaSetDevice(0));
TestStatus status;
LaunchN(16, TestAssignment{ status.Data() });
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, TestStatus) {
cuda_handler(cudaSetDevice(0));
TestStatus status;
LaunchN(16, TestTestStatus{ status.Data() });
ASSERT_EQ(status.Get(), -1);
}
template <typename Iter>
XGBOOST_DEVICE void InitializeRange(Iter _begin, Iter _end) {
float j = 0;
for (Iter i = _begin; i != _end; ++i, ++j) {
*i = j;
}
}
__global__ void TestFrontKernel(Span<float> _span) {
_span.front();
}
__global__ void TestBackKernel(Span<float> _span) {
_span.back();
}
TEST(GPUSpan, FrontBack) {
cuda_handler(cudaSetDevice(0));
Span<float> s;
auto lambda_test_front = [=]() {
// make sure the termination happens inside this test.
try {
TestFrontKernel << <1, 1 >> > (s);
cuda_handler(cudaDeviceSynchronize());
cuda_handler(cudaGetLastError());
}
catch (std::runtime_error const& e) {
std::terminate();
}
};
EXPECT_DEATH(lambda_test_front(), "");
auto lambda_test_back = [=]() {
try {
TestBackKernel << <1, 1 >> > (s);
cuda_handler(cudaDeviceSynchronize());
cuda_handler(cudaGetLastError());
}
catch (std::runtime_error const& e) {
std::terminate();
}
};
EXPECT_DEATH(lambda_test_back(), "");
}
__global__ void TestSubspanDynamicKernel(Span<float> _span) {
_span.subspan(16, 0);
}
__global__ void TestSubspanStaticKernel(Span<float> _span) {
_span.subspan<16>();
}
TEST(GPUSpan, Subspan) {
auto lambda_subspan_dynamic = []() {
thrust::host_vector<float> h_vec(4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec(h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span(d_vec.data().get(), d_vec.size());
TestSubspanDynamicKernel << <1, 1 >> > (span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_subspan_dynamic(), "");
std::string output = testing::internal::GetCapturedStdout();
auto lambda_subspan_static = []() {
thrust::host_vector<float> h_vec(4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec(h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span(d_vec.data().get(), d_vec.size());
TestSubspanStaticKernel << <1, 1 >> > (span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_subspan_static(), "");
output = testing::internal::GetCapturedStdout();
}
}
|
c867cd4a3e27ac2956585885678f2c72734b06fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/PQScanMultiPassPrecomputed.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/PQCodeLoad.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <limits>
namespace faiss { namespace gpu {
// For precomputed codes, this calculates and loads code distances
// into smem
template <typename LookupT, typename LookupVecT>
inline __device__ void
loadPrecomputedTerm(LookupT* smem,
LookupT* term2Start,
LookupT* term3Start,
int numCodes) {
constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT);
// We can only use vector loads if the data is guaranteed to be
// aligned. The codes are innermost, so if it is evenly divisible,
// then any slice will be aligned.
if (numCodes % kWordSize == 0) {
constexpr int kUnroll = 2;
// Load the data by float4 for efficiency, and then handle any remainder
// limitVec is the number of whole vec words we can load, in terms
// of whole blocks performing the load
int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x);
limitVec *= kUnroll * blockDim.x;
LookupVecT* smemV = (LookupVecT*) smem;
LookupVecT* term2StartV = (LookupVecT*) term2Start;
LookupVecT* term3StartV = (LookupVecT*) term3Start;
for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) {
LookupVecT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] =
LoadStore<LookupVecT>::load(&term2StartV[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LookupVecT q =
LoadStore<LookupVecT>::load(&term3StartV[i + j * blockDim.x]);
vals[j] = Math<LookupVecT>::add(vals[j], q);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LoadStore<LookupVecT>::store(&smemV[i + j * blockDim.x], vals[j]);
}
}
// This is where we start loading the remainder that does not evenly
// fit into kUnroll x blockDim.x
int remainder = limitVec * kWordSize;
for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
} else {
// Potential unaligned load
constexpr int kUnroll = 4;
int limit = utils::roundDown(numCodes, kUnroll * blockDim.x);
int i = threadIdx.x;
for (; i < limit; i += kUnroll * blockDim.x) {
LookupT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = term2Start[i + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = Math<LookupT>::add(vals[j], term3Start[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
smem[i + j * blockDim.x] = vals[j];
}
}
for (; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
}
}
template <int NumSubQuantizers, typename LookupT, typename LookupVecT>
__global__ void
pqScanPrecomputedMultiPass(Tensor<float, 2, true> queries,
Tensor<float, 2, true> precompTerm1,
Tensor<LookupT, 3, true> precompTerm2,
Tensor<LookupT, 3, true> precompTerm3,
Tensor<int, 2, true> topQueryToCentroid,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
// precomputed term 2 + 3 storage
// (sub q)(code id)
extern __shared__ char smemTerm23[];
LookupT* term23 = (LookupT*) smemTerm23;
// Each block handles a single query
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
auto codesPerSubQuantizer = precompTerm2.getSize(2);
auto precompTermSize = precompTerm2.getSize(1) * codesPerSubQuantizer;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
unsigned char* codeList = (unsigned char*) listCodes[listId];
int limit = listLengths[listId];
constexpr int kNumCode32 = NumSubQuantizers <= 4 ? 1 :
(NumSubQuantizers / 4);
unsigned int code32[kNumCode32];
unsigned int nextCode32[kNumCode32];
// We double-buffer the code loading, which improves memory utilization
if (threadIdx.x < limit) {
LoadCode32<NumSubQuantizers>::load(code32, codeList, threadIdx.x);
}
// Load precomputed terms 1, 2, 3
float term1 = precompTerm1[queryId][probeId];
loadPrecomputedTerm<LookupT, LookupVecT>(term23,
precompTerm2[listId].data(),
precompTerm3[queryId].data(),
precompTermSize);
// Prevent WAR dependencies
__syncthreads();
// Each thread handles one code element in the list, with a
// block-wide stride
for (int codeIndex = threadIdx.x;
codeIndex < limit;
codeIndex += blockDim.x) {
// Prefetch next codes
if (codeIndex + blockDim.x < limit) {
LoadCode32<NumSubQuantizers>::load(
nextCode32, codeList, codeIndex + blockDim.x);
}
float dist = term1;
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
constexpr int kBytesPerCode32 =
NumSubQuantizers < 4 ? NumSubQuantizers : 4;
if (kBytesPerCode32 == 1) {
auto code = code32[0];
dist = ConvertTo<float>::to(term23[code]);
} else {
#pragma unroll
for (int byte = 0; byte < kBytesPerCode32; ++byte) {
auto code = getByte(code32[word], byte * 8, 8);
auto offset =
codesPerSubQuantizer * (word * kBytesPerCode32 + byte);
dist += ConvertTo<float>::to(term23[offset + code]);
}
}
}
// Write out intermediate distance result
// We do not maintain indices here, in order to reduce global
// memory traffic. Those are recovered in the final selection step.
distanceOut[codeIndex] = dist;
// Rotate buffers
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
code32[word] = nextCode32[word];
}
}
}
void
runMultiPassTile(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<int64_t, 2, true>& outIndices,
hipStream_t stream) {
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(topQueryToCentroid, listLengths, prefixSumOffsets,
thrustMem, stream);
// Convert all codes to a distance, and write out (distance,
// index) values for all intermediate results
{
auto kThreadsPerBlock = 256;
auto grid = dim3(topQueryToCentroid.getSize(1),
topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
// pq precomputed terms (2 + 3)
auto smem = useFloat16Lookup ? sizeof(half) : sizeof(float);
smem *= numSubQuantizers * numSubQuantizerCodes;
FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice());
#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \
do { \
auto precompTerm2T = precompTerm2.toTensor<LOOKUP_T>(); \
auto precompTerm3T = precompTerm3.toTensor<LOOKUP_T>(); \
\
hipLaunchKernelGGL(( pqScanPrecomputedMultiPass<NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T>) \
, dim3(grid), dim3(block), smem, stream, \
queries, \
precompTerm1, \
precompTerm2T, \
precompTerm3T, \
topQueryToCentroid, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
#define RUN_PQ(NUM_SUB_Q) \
do { \
if (useFloat16Lookup) { \
RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \
} else { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} \
} while (0)
switch (bytesPerCode) {
case 1:
RUN_PQ(1);
break;
case 2:
RUN_PQ(2);
break;
case 3:
RUN_PQ(3);
break;
case 4:
RUN_PQ(4);
break;
case 8:
RUN_PQ(8);
break;
case 12:
RUN_PQ(12);
break;
case 16:
RUN_PQ(16);
break;
case 20:
RUN_PQ(20);
break;
case 24:
RUN_PQ(24);
break;
case 28:
RUN_PQ(28);
break;
case 32:
RUN_PQ(32);
break;
case 40:
RUN_PQ(40);
break;
case 48:
RUN_PQ(48);
break;
case 56:
RUN_PQ(56);
break;
case 64:
RUN_PQ(64);
break;
case 96:
RUN_PQ(96);
break;
default:
FAISS_ASSERT(false);
break;
}
CUDA_TEST_ERROR();
#undef RUN_PQ
#undef RUN_PQ_OPT
}
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
topQueryToCentroid.getSize(1),
k,
false, // L2 distance chooses smallest
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
topQueryToCentroid,
k,
false, // L2 distance chooses smallest
outDistances,
outIndices,
stream);
CUDA_TEST_ERROR();
}
void runPQScanMultiPassPrecomputed(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<int64_t, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = topQueryToCentroid.getSize(1);
auto& mem = res->getMemoryManagerCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true> thrustMem2(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
int64_t sizeAvailable = mem.getSizeAvailable();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = ::min(nprobe, kNProbeSplit);
int64_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
int64_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <=
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true> allDistances2(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true> heapDistances2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true> heapIndices2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto coarseIndicesView =
topQueryToCentroid.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto term1View =
precompTerm1.narrowOutermost(query, numQueriesInTile);
auto term3View =
precompTerm3.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runMultiPassTile(queryView,
term1View,
precompTerm2,
term3View,
coarseIndicesView,
useFloat16Lookup,
bytesPerCode,
numSubQuantizers,
numSubQuantizerCodes,
listCodes,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
| c867cd4a3e27ac2956585885678f2c72734b06fa.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/PQScanMultiPassPrecomputed.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/PQCodeLoad.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <limits>
namespace faiss { namespace gpu {
// For precomputed codes, this calculates and loads code distances
// into smem
template <typename LookupT, typename LookupVecT>
inline __device__ void
loadPrecomputedTerm(LookupT* smem,
LookupT* term2Start,
LookupT* term3Start,
int numCodes) {
constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT);
// We can only use vector loads if the data is guaranteed to be
// aligned. The codes are innermost, so if it is evenly divisible,
// then any slice will be aligned.
if (numCodes % kWordSize == 0) {
constexpr int kUnroll = 2;
// Load the data by float4 for efficiency, and then handle any remainder
// limitVec is the number of whole vec words we can load, in terms
// of whole blocks performing the load
int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x);
limitVec *= kUnroll * blockDim.x;
LookupVecT* smemV = (LookupVecT*) smem;
LookupVecT* term2StartV = (LookupVecT*) term2Start;
LookupVecT* term3StartV = (LookupVecT*) term3Start;
for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) {
LookupVecT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] =
LoadStore<LookupVecT>::load(&term2StartV[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LookupVecT q =
LoadStore<LookupVecT>::load(&term3StartV[i + j * blockDim.x]);
vals[j] = Math<LookupVecT>::add(vals[j], q);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LoadStore<LookupVecT>::store(&smemV[i + j * blockDim.x], vals[j]);
}
}
// This is where we start loading the remainder that does not evenly
// fit into kUnroll x blockDim.x
int remainder = limitVec * kWordSize;
for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
} else {
// Potential unaligned load
constexpr int kUnroll = 4;
int limit = utils::roundDown(numCodes, kUnroll * blockDim.x);
int i = threadIdx.x;
for (; i < limit; i += kUnroll * blockDim.x) {
LookupT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = term2Start[i + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = Math<LookupT>::add(vals[j], term3Start[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
smem[i + j * blockDim.x] = vals[j];
}
}
for (; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
}
}
template <int NumSubQuantizers, typename LookupT, typename LookupVecT>
__global__ void
pqScanPrecomputedMultiPass(Tensor<float, 2, true> queries,
Tensor<float, 2, true> precompTerm1,
Tensor<LookupT, 3, true> precompTerm2,
Tensor<LookupT, 3, true> precompTerm3,
Tensor<int, 2, true> topQueryToCentroid,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
// precomputed term 2 + 3 storage
// (sub q)(code id)
extern __shared__ char smemTerm23[];
LookupT* term23 = (LookupT*) smemTerm23;
// Each block handles a single query
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
auto codesPerSubQuantizer = precompTerm2.getSize(2);
auto precompTermSize = precompTerm2.getSize(1) * codesPerSubQuantizer;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
unsigned char* codeList = (unsigned char*) listCodes[listId];
int limit = listLengths[listId];
constexpr int kNumCode32 = NumSubQuantizers <= 4 ? 1 :
(NumSubQuantizers / 4);
unsigned int code32[kNumCode32];
unsigned int nextCode32[kNumCode32];
// We double-buffer the code loading, which improves memory utilization
if (threadIdx.x < limit) {
LoadCode32<NumSubQuantizers>::load(code32, codeList, threadIdx.x);
}
// Load precomputed terms 1, 2, 3
float term1 = precompTerm1[queryId][probeId];
loadPrecomputedTerm<LookupT, LookupVecT>(term23,
precompTerm2[listId].data(),
precompTerm3[queryId].data(),
precompTermSize);
// Prevent WAR dependencies
__syncthreads();
// Each thread handles one code element in the list, with a
// block-wide stride
for (int codeIndex = threadIdx.x;
codeIndex < limit;
codeIndex += blockDim.x) {
// Prefetch next codes
if (codeIndex + blockDim.x < limit) {
LoadCode32<NumSubQuantizers>::load(
nextCode32, codeList, codeIndex + blockDim.x);
}
float dist = term1;
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
constexpr int kBytesPerCode32 =
NumSubQuantizers < 4 ? NumSubQuantizers : 4;
if (kBytesPerCode32 == 1) {
auto code = code32[0];
dist = ConvertTo<float>::to(term23[code]);
} else {
#pragma unroll
for (int byte = 0; byte < kBytesPerCode32; ++byte) {
auto code = getByte(code32[word], byte * 8, 8);
auto offset =
codesPerSubQuantizer * (word * kBytesPerCode32 + byte);
dist += ConvertTo<float>::to(term23[offset + code]);
}
}
}
// Write out intermediate distance result
// We do not maintain indices here, in order to reduce global
// memory traffic. Those are recovered in the final selection step.
distanceOut[codeIndex] = dist;
// Rotate buffers
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
code32[word] = nextCode32[word];
}
}
}
void
runMultiPassTile(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<int64_t, 2, true>& outIndices,
cudaStream_t stream) {
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(topQueryToCentroid, listLengths, prefixSumOffsets,
thrustMem, stream);
// Convert all codes to a distance, and write out (distance,
// index) values for all intermediate results
{
auto kThreadsPerBlock = 256;
auto grid = dim3(topQueryToCentroid.getSize(1),
topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
// pq precomputed terms (2 + 3)
auto smem = useFloat16Lookup ? sizeof(half) : sizeof(float);
smem *= numSubQuantizers * numSubQuantizerCodes;
FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice());
#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \
do { \
auto precompTerm2T = precompTerm2.toTensor<LOOKUP_T>(); \
auto precompTerm3T = precompTerm3.toTensor<LOOKUP_T>(); \
\
pqScanPrecomputedMultiPass<NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T> \
<<<grid, block, smem, stream>>>( \
queries, \
precompTerm1, \
precompTerm2T, \
precompTerm3T, \
topQueryToCentroid, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
#define RUN_PQ(NUM_SUB_Q) \
do { \
if (useFloat16Lookup) { \
RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \
} else { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} \
} while (0)
switch (bytesPerCode) {
case 1:
RUN_PQ(1);
break;
case 2:
RUN_PQ(2);
break;
case 3:
RUN_PQ(3);
break;
case 4:
RUN_PQ(4);
break;
case 8:
RUN_PQ(8);
break;
case 12:
RUN_PQ(12);
break;
case 16:
RUN_PQ(16);
break;
case 20:
RUN_PQ(20);
break;
case 24:
RUN_PQ(24);
break;
case 28:
RUN_PQ(28);
break;
case 32:
RUN_PQ(32);
break;
case 40:
RUN_PQ(40);
break;
case 48:
RUN_PQ(48);
break;
case 56:
RUN_PQ(56);
break;
case 64:
RUN_PQ(64);
break;
case 96:
RUN_PQ(96);
break;
default:
FAISS_ASSERT(false);
break;
}
CUDA_TEST_ERROR();
#undef RUN_PQ
#undef RUN_PQ_OPT
}
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
topQueryToCentroid.getSize(1),
k,
false, // L2 distance chooses smallest
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
topQueryToCentroid,
k,
false, // L2 distance chooses smallest
outDistances,
outIndices,
stream);
CUDA_TEST_ERROR();
}
void runPQScanMultiPassPrecomputed(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<int64_t, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = topQueryToCentroid.getSize(1);
auto& mem = res->getMemoryManagerCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true> thrustMem2(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
int64_t sizeAvailable = mem.getSizeAvailable();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = std::min(nprobe, kNProbeSplit);
int64_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
int64_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <=
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true> allDistances2(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true> heapDistances2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true> heapIndices2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
std::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto coarseIndicesView =
topQueryToCentroid.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto term1View =
precompTerm1.narrowOutermost(query, numQueriesInTile);
auto term3View =
precompTerm3.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runMultiPassTile(queryView,
term1View,
precompTerm2,
term3View,
coarseIndicesView,
useFloat16Lookup,
bytesPerCode,
numSubQuantizers,
numSubQuantizerCodes,
listCodes,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
|
bb4a5c70a48fffbd94b0b7b1b5ea778af28822cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdexcept>
#include "instanceNormalization3DPlugin.h"
using namespace nvinfer1;
using nvinfer1::plugin::InstanceNormalization3DPlugin;
using nvinfer1::plugin::InstanceNormalization3DPluginCreator;
#define CHECK_CUDA(call) \
do \
{ \
hipError_t status = call; \
if (status != hipSuccess) \
{ \
return status; \
} \
} while (0)
#define CHECK_CUDNN(call) \
do \
{ \
cudnnStatus_t status = call; \
if (status != CUDNN_STATUS_SUCCESS) \
{ \
return status; \
} \
} while (0)
template<typename T, int THREADS_PER_CTA>
__global__ __launch_bounds__(THREADS_PER_CTA)
void in3d_relu_activation(T* __restrict dst, T* __restrict src, float alpha, int count)
{
int idx = blockIdx.x * THREADS_PER_CTA + threadIdx.x;
if (idx >= count) return;
float val = src[idx];
dst[idx] = (val < 0.f)? val * alpha : val;
}
// This is derived from: https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
inline float half_to_float_fast(unsigned short value)
{
union F32
{
unsigned int u;
float f;
};
static const F32 magic = {(254 - 15) << 23};
static const F32 was_infnan = {(127 + 16) << 23};
F32 result;
result.u = (value & 0x7fff) << 13; // exponent/mantissa bits
result.f *= magic.f; // exponent adjust
if (result.f >= was_infnan.f)
{ // make sure Inf/NaN survive
result.u |= 255 << 23;
}
result.u |= (value & 0x8000) << 16; // sign bit
return result.f;
}
cudnnStatus_t convert_trt2cudnn_dtype(nvinfer1::DataType trt_dtype, cudnnDataType_t* cudnn_dtype)
{
switch (trt_dtype)
{
case nvinfer1::DataType::kFLOAT: *cudnn_dtype = CUDNN_DATA_FLOAT; break;
case nvinfer1::DataType::kHALF: *cudnn_dtype = CUDNN_DATA_HALF; break;
default: return CUDNN_STATUS_BAD_PARAM;
}
return CUDNN_STATUS_SUCCESS;
}
namespace {
const char* INSTNORM3D_PLUGIN_VERSION{"1"};
const char* INSTNORM3D_PLUGIN_NAME{"INSTNORM3D_TRT"};
}
REGISTER_TENSORRT_PLUGIN(InstanceNormalization3DPluginCreator);
PluginFieldCollection InstanceNormalization3DPluginCreator::mFC{};
std::vector<PluginField> InstanceNormalization3DPluginCreator::mPluginAttributes;
InstanceNormalization3DPlugin::InstanceNormalization3DPlugin(
float epsilon, const std::vector<float>& scale, const std::vector<float>& bias, int relu, float alpha)
: _epsilon(epsilon)
, _nchan(scale.size())
, _h_scale(scale)
, _h_bias(bias)
, _relu(relu)
, _alpha(alpha)
, _in_scale(-1.f)
, _out_scale(-1.f)
{
ASSERT(scale.size() == bias.size());
}
InstanceNormalization3DPlugin::InstanceNormalization3DPlugin(
float epsilon, nvinfer1::Weights const& scale, nvinfer1::Weights const& bias, int relu, float alpha)
: _epsilon(epsilon)
, _nchan(scale.count)
, _relu(relu)
, _alpha(alpha)
, _in_scale(-1.f)
, _out_scale(-1.f)
{
ASSERT(scale.count == bias.count);
if (scale.type == nvinfer1::DataType::kFLOAT)
{
_h_scale.assign((float*) scale.values, (float*) scale.values + scale.count);
}
else if (scale.type == nvinfer1::DataType::kHALF)
{
_h_scale.reserve(_nchan);
for (int c = 0; c < _nchan; ++c)
{
unsigned short value = ((unsigned short*) scale.values)[c];
_h_scale.push_back(half_to_float_fast(value));
}
}
else
{
throw std::runtime_error("Unsupported scale dtype");
}
if (bias.type == nvinfer1::DataType::kFLOAT)
{
_h_bias.assign((float*) bias.values, (float*) bias.values + bias.count);
}
else if (bias.type == nvinfer1::DataType::kHALF)
{
_h_bias.reserve(_nchan);
for (int c = 0; c < _nchan; ++c)
{
unsigned short value = ((unsigned short*) bias.values)[c];
_h_bias.push_back(half_to_float_fast(value));
}
}
else
{
throw std::runtime_error("Unsupported bias dtype");
}
}
InstanceNormalization3DPlugin::InstanceNormalization3DPlugin(void const* serialData, size_t serialLength)
{
deserialize_value(&serialData, &serialLength, &_epsilon);
deserialize_value(&serialData, &serialLength, &_nchan);
deserialize_value(&serialData, &serialLength, &_h_scale);
deserialize_value(&serialData, &serialLength, &_h_bias);
deserialize_value(&serialData, &serialLength, &_relu);
deserialize_value(&serialData, &serialLength, &_alpha);
deserialize_value(&serialData, &serialLength, &_in_scale);
deserialize_value(&serialData, &serialLength, &_out_scale);
}
InstanceNormalization3DPlugin::~InstanceNormalization3DPlugin()
{
terminate();
}
// InstanceNormalization3DPlugin returns one output.
int InstanceNormalization3DPlugin::getNbOutputs() const
{
return 1;
}
DimsExprs InstanceNormalization3DPlugin::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder)
{
nvinfer1::DimsExprs output(inputs[0]);
return output;
}
int InstanceNormalization3DPlugin::initialize()
{
if (!initialized)
{
CHECK_CUDNN(cudnnCreate(&_cudnn_handle));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&_b_desc));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&_x_desc));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&_y_desc));
// NDHWC path
// Device info.
int device;
CHECK_CUDA(hipGetDevice(&device));
hipDeviceProp_t props;
CHECK_CUDA(hipGetDeviceProperties(&props, device));
_context.sm_count = props.multiProcessorCount;
_context.sm_shared_size = props.sharedMemPerMultiprocessor;
_context.sm_version = props.major * 100 + props.minor * 10;
memset(&_params, 0, sizeof(_params));
CHECK_CUDA(hipMalloc(&_d_scale, _nchan*sizeof(float)));
CHECK_CUDA(hipMalloc(&_d_bias, _nchan*sizeof(float)));
CHECK_CUDA(hipMemcpy(_d_scale, &_h_scale[0], _nchan*sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(_d_bias, &_h_bias[0], _nchan*sizeof(float), hipMemcpyHostToDevice));
}
initialized = true;
return 0;
}
void InstanceNormalization3DPlugin::terminate()
{
if (initialized)
{
cudnnDestroyTensorDescriptor(_y_desc);
cudnnDestroyTensorDescriptor(_x_desc);
cudnnDestroyTensorDescriptor(_b_desc);
cudnnDestroy(_cudnn_handle);
hipFree(_d_bias);
hipFree(_d_scale);
}
initialized = false;
return;
}
size_t InstanceNormalization3DPlugin::getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs, int nbInputs, const nvinfer1::PluginTensorDesc* outputs, int nbOutputs) const
{
if (inputs[0].format == nvinfer1::PluginFormat::kLINEAR)
{
nvinfer1::Dims input_dims = inputs[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
size_t nchan_bytes = c * sizeof(float);
size_t scale_size = n * nchan_bytes;
size_t bias_size = n * nchan_bytes;
size_t total_wss = scale_size + bias_size;
return total_wss;
}
else if (inputs[0].format == nvinfer1::PluginFormat::kDHWC8 ||
inputs[0].format == nvinfer1::PluginFormat::kCDHW32)
{
int input_data_type = (inputs[0].type == nvinfer1::DataType::kHALF) ? 1 : 2;
int output_data_type = (outputs[0].type == nvinfer1::DataType::kHALF) ? 1 : 2;
nvinfer1::Dims input_dims = inputs[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
int d = input_dims.d[2];
int h = input_dims.d[3];
int w = input_dims.d[4];
InstanceNormFwdParams params;
// only these parameters are required for workspace computation
params.nhw = d*h*w;
params.c = c;
params.n = n;
// Reserve memory for the workspaces.
size_t size_sums, size_counts, size_retired_ctas;
instance_norm_buffer_sizes_dispatch(_context, params, size_sums, size_counts, size_retired_ctas,
input_data_type, output_data_type);
size_t size_nc = n*c*sizeof(float);
size_nc = ((size_nc + 256 - 1) / 256) * 256;
return size_sums + size_counts + size_retired_ctas + 4 * size_nc;
}
else
{
ASSERT(0);
}
}
int InstanceNormalization3DPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace,
hipStream_t stream)
{
ASSERT(initialized);
if (inputDesc[0].format == nvinfer1::PluginFormat::kLINEAR)
{
CHECK_CUDNN(cudnnSetStream(_cudnn_handle, stream));
nvinfer1::Dims input_dims = inputDesc[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
int d = input_dims.d[2];
int h = input_dims.d[3];
int w = input_dims.d[4];
size_t nchan_bytes = c * sizeof(float);
// Note: We repeat the data for each batch entry so that we can do the full
// computation in a single CUDNN call in enqueue().
float* _d_array = (float*)workspace;
float* d_scale = &_d_array[0];
float* d_bias = &_d_array[n*c];
for (int i = 0; i < n; ++i)
{
CHECK_CUDA(hipMemcpyAsync(d_scale + i * c, _d_scale, nchan_bytes, hipMemcpyDeviceToDevice, stream));
CHECK_CUDA(hipMemcpyAsync(d_bias + i * c, _d_bias, nchan_bytes, hipMemcpyDeviceToDevice, stream));
}
int nc_dimA[] = {1, n*c, 1, 1, 1};
int nc_strideA[] = {nc_dimA[1]*nc_dimA[2]*nc_dimA[3]*nc_dimA[4],\
nc_dimA[2]*nc_dimA[3]*nc_dimA[4],\
nc_dimA[3]*nc_dimA[4],\
nc_dimA[4],\
1};
int img_dimA[] = {1, n*c, d, h, w};
int img_strideA[] = {img_dimA[1]*img_dimA[2]*img_dimA[3]*img_dimA[4],\
img_dimA[2]*img_dimA[3]*img_dimA[4],\
img_dimA[3]*img_dimA[4],\
img_dimA[4],\
1};
CHECK_CUDNN(cudnnSetTensorNdDescriptor(_b_desc, CUDNN_DATA_FLOAT, 5, nc_dimA, nc_strideA));
cudnnDataType_t cudnn_dtype;
CHECK_CUDNN(convert_trt2cudnn_dtype(inputDesc[0].type, &cudnn_dtype));
CHECK_CUDNN(cudnnSetTensorNdDescriptor(_x_desc, cudnn_dtype, 5, img_dimA, img_strideA));
CHECK_CUDNN(cudnnSetTensorNdDescriptor(_y_desc, cudnn_dtype, 5, img_dimA, img_strideA));
float alpha = 1;
float beta = 0;
//hipStreamSynchronize(stream);
void const* x_ptr = inputs[0];
void* y_ptr = outputs[0];
// Note: Use of CUDNN_BATCHNORM_SPATIAL_PERSISTENT can cause numerical
// overflows (NaNs) for fp32 data in some circumstances. The lower-
// performance CUDNN_BATCHNORM_SPATIAL should be used if this is not
// acceptable.
CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(_cudnn_handle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, &alpha, &beta,
_x_desc, x_ptr, _y_desc, y_ptr, _b_desc, d_scale, d_bias, 1., nullptr, nullptr, _epsilon, nullptr, nullptr));
if (_relu > 0)
{
int count = n * c * d * h * w;
const int BLOCK_SZ = 256;
if (inputDesc[0].type == nvinfer1::DataType::kFLOAT)
{
hipLaunchKernelGGL(( in3d_relu_activation<float, BLOCK_SZ>), dim3((count + BLOCK_SZ - 1) / BLOCK_SZ), dim3(BLOCK_SZ), 0, stream, (float *)y_ptr, (float *)y_ptr, _alpha, count);
} else if (inputDesc[0].type == nvinfer1::DataType::kHALF)
{
hipLaunchKernelGGL(( in3d_relu_activation<__half, BLOCK_SZ>), dim3((count + BLOCK_SZ - 1) / BLOCK_SZ), dim3(BLOCK_SZ), 0, stream, (__half *)y_ptr, (__half *)y_ptr, _alpha, count);
}
else
{
ASSERT(0);
}
}
}
else if (inputDesc[0].format == nvinfer1::PluginFormat::kDHWC8 ||
inputDesc[0].format == nvinfer1::PluginFormat::kCDHW32)
{
int input_data_type = (inputDesc[0].type == nvinfer1::DataType::kHALF) ? 1 : 2;
int output_data_type = (outputDesc[0].type == nvinfer1::DataType::kHALF) ? 1 : 2;
nvinfer1::Dims input_dims = inputDesc[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
int d = input_dims.d[2];
int h = input_dims.d[3];
int w = input_dims.d[4];
_params.nhw = d*h*w;
_params.c = c;
_params.n = n;
size_t size_sums, size_counts, size_retired_ctas;
instance_norm_buffer_sizes_dispatch(_context, _params, size_sums, size_counts, size_retired_ctas,
input_data_type, output_data_type);
size_t size_nc = n*c*sizeof(float);
size_nc = ((size_nc + 256 - 1) / 256) * 256;
char* d_buf = reinterpret_cast<char *>(workspace);
_params.gmem_sums = reinterpret_cast<GMEM_SUMS_TYPE *>(d_buf); d_buf += size_sums;
_params.gmem_counts = reinterpret_cast<int *>(d_buf); d_buf += size_counts;
_params.gmem_retired_ctas = reinterpret_cast<int *>(d_buf); d_buf += size_retired_ctas;
_params.gmem_running_mean = reinterpret_cast<float *>(d_buf); d_buf += size_nc;
_params.gmem_running_var = reinterpret_cast<float *>(d_buf); d_buf += size_nc;
_params.gmem_saved_mean = reinterpret_cast<float *>(d_buf); d_buf += size_nc;
_params.gmem_saved_var = reinterpret_cast<float *>(d_buf); d_buf += size_nc;
_params.gmem_src = const_cast<void *>(inputs[0]);
_params.gmem_dst = outputs[0];
_params.gmem_bias = _d_bias;
_params.gmem_scale = _d_scale;
_params.var_eps = _epsilon;
_params.exp_avg_factor = 1.f; //(float)exp_avg_factor;
_params.use_relu = _relu; //use_relu;
_params.relu_alpha = _alpha; //relu_alpha;
_params.in_scale = _in_scale;
_params.out_scale = 1.f / _out_scale;
int loop = instance_norm_fwd_dispatch(_context, _params, stream, input_data_type, output_data_type);
}
else
{
ASSERT(false && "Unexpected input format");
}
return 0;
}
size_t InstanceNormalization3DPlugin::getSerializationSize() const
{
return (serialized_size(_epsilon) +
serialized_size(_nchan) +
serialized_size(_h_scale) +
serialized_size(_h_bias)) +
serialized_size(_relu) +
serialized_size(_alpha) +
serialized_size(_in_scale) +
serialized_size(_out_scale);
}
void InstanceNormalization3DPlugin::serialize(void *buffer) const
{
serialize_value(&buffer, _epsilon);
serialize_value(&buffer, _nchan);
serialize_value(&buffer, _h_scale);
serialize_value(&buffer, _h_bias);
serialize_value(&buffer, _relu);
serialize_value(&buffer, _alpha);
serialize_value(&buffer, _in_scale);
serialize_value(&buffer, _out_scale);
}
bool InstanceNormalization3DPlugin::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs)
{
ASSERT(inOut && pos < (nbInputs + nbOutputs));
bool support_fp32_linear = (inOut[pos].type == nvinfer1::DataType::kFLOAT
&& inOut[pos].format == nvinfer1::PluginFormat::kLINEAR
&& inOut[pos].type == inOut[0].type
&& inOut[pos].format == inOut[0].format);
bool support_fp16_dhwc8 = (inOut[pos].type == nvinfer1::DataType::kHALF
&& inOut[pos].format == nvinfer1::PluginFormat::kDHWC8
&& inOut[pos].type == inOut[0].type
&& inOut[pos].format == inOut[0].format);
bool support_int8_cdhw32 = (inOut[pos].type == nvinfer1::DataType::kINT8
&& inOut[pos].format == nvinfer1::PluginFormat::kCDHW32
&& inOut[pos].type == inOut[0].type
&& inOut[pos].format == inOut[0].format);
ASSERT(pos == 0 || pos == 1);
return support_fp32_linear || support_fp16_dhwc8 || support_int8_cdhw32;
}
const char* InstanceNormalization3DPlugin::getPluginType() const
{
return INSTNORM3D_PLUGIN_NAME;
}
const char* InstanceNormalization3DPlugin::getPluginVersion() const
{
return INSTNORM3D_PLUGIN_VERSION;
}
void InstanceNormalization3DPlugin::destroy()
{
delete this;
}
IPluginV2DynamicExt* InstanceNormalization3DPlugin::clone() const
{
auto plugin = new InstanceNormalization3DPlugin{_epsilon, _h_scale, _h_bias, _relu, _alpha};
plugin->setPluginNamespace(mPluginNamespace);
plugin->initialize();
return plugin;
}
// Set plugin namespace
void InstanceNormalization3DPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* InstanceNormalization3DPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
nvinfer1::DataType InstanceNormalization3DPlugin::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
ASSERT(inputTypes && nbInputs > 0 && index == 0);
return nvinfer1::DataType::kFLOAT;
}
void InstanceNormalization3DPlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs)
{
_in_scale = in[0].desc.scale;
_out_scale = out[0].desc.scale;
}
// InstanceNormalization3DPluginCreator methods
InstanceNormalization3DPluginCreator::InstanceNormalization3DPluginCreator()
{
mPluginAttributes.emplace_back(PluginField("epsilon", nullptr, PluginFieldType::kFLOAT32, 1));
mPluginAttributes.emplace_back(PluginField("scales", nullptr, PluginFieldType::kFLOAT32, 1));
mPluginAttributes.emplace_back(PluginField("bias", nullptr, PluginFieldType::kFLOAT32, 1));
mPluginAttributes.emplace_back(PluginField("relu", nullptr, PluginFieldType::kINT32, 1));
mPluginAttributes.emplace_back(PluginField("alpha", nullptr, PluginFieldType::kFLOAT32, 1));
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* InstanceNormalization3DPluginCreator::getPluginName() const
{
return INSTNORM3D_PLUGIN_NAME;
}
const char* InstanceNormalization3DPluginCreator::getPluginVersion() const
{
return INSTNORM3D_PLUGIN_VERSION;
}
const PluginFieldCollection* InstanceNormalization3DPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2DynamicExt* InstanceNormalization3DPluginCreator::createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc)
{
std::vector<float> scaleValues;
std::vector<float> biasValues;
float epsilon {};
int relu {};
float alpha {};
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; ++i)
{
const char* attrName = fields[i].name;
if (!strcmp(attrName, "epsilon"))
{
ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
epsilon= *(static_cast<const float*>(fields[i].data));
}
else if (!strcmp(attrName, "scales"))
{
ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
int size = fields[i].length;
scaleValues.reserve(size);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < size; j++)
{
scaleValues.push_back(*w);
w++;
}
}
else if (!strcmp(attrName, "bias"))
{
ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
int size = fields[i].length;
biasValues.reserve(size);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < size; j++)
{
biasValues.push_back(*w);
w++;
}
}
else if (!strcmp(attrName, "relu"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
relu= *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "alpha"))
{
ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
alpha= *(static_cast<const float*>(fields[i].data));
}
}
Weights scaleWeights{DataType::kFLOAT, scaleValues.data(), (int64_t) scaleValues.size()};
Weights biasWeights{DataType::kFLOAT, biasValues.data(), (int64_t) biasValues.size()};
InstanceNormalization3DPlugin* obj = new InstanceNormalization3DPlugin(epsilon, scaleWeights, biasWeights, relu, alpha);
obj->setPluginNamespace(mNamespace.c_str());
obj->initialize();
return obj;
}
IPluginV2DynamicExt* InstanceNormalization3DPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
InstanceNormalization3DPlugin* obj = new InstanceNormalization3DPlugin{serialData, serialLength};
obj->setPluginNamespace(mNamespace.c_str());
obj->initialize();
return obj;
}
| bb4a5c70a48fffbd94b0b7b1b5ea778af28822cf.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdexcept>
#include "instanceNormalization3DPlugin.h"
using namespace nvinfer1;
using nvinfer1::plugin::InstanceNormalization3DPlugin;
using nvinfer1::plugin::InstanceNormalization3DPluginCreator;
#define CHECK_CUDA(call) \
do \
{ \
cudaError_t status = call; \
if (status != cudaSuccess) \
{ \
return status; \
} \
} while (0)
#define CHECK_CUDNN(call) \
do \
{ \
cudnnStatus_t status = call; \
if (status != CUDNN_STATUS_SUCCESS) \
{ \
return status; \
} \
} while (0)
template<typename T, int THREADS_PER_CTA>
__global__ __launch_bounds__(THREADS_PER_CTA)
void in3d_relu_activation(T* __restrict dst, T* __restrict src, float alpha, int count)
{
int idx = blockIdx.x * THREADS_PER_CTA + threadIdx.x;
if (idx >= count) return;
float val = src[idx];
dst[idx] = (val < 0.f)? val * alpha : val;
}
// This is derived from: https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
inline float half_to_float_fast(unsigned short value)
{
union F32
{
unsigned int u;
float f;
};
static const F32 magic = {(254 - 15) << 23};
static const F32 was_infnan = {(127 + 16) << 23};
F32 result;
result.u = (value & 0x7fff) << 13; // exponent/mantissa bits
result.f *= magic.f; // exponent adjust
if (result.f >= was_infnan.f)
{ // make sure Inf/NaN survive
result.u |= 255 << 23;
}
result.u |= (value & 0x8000) << 16; // sign bit
return result.f;
}
cudnnStatus_t convert_trt2cudnn_dtype(nvinfer1::DataType trt_dtype, cudnnDataType_t* cudnn_dtype)
{
switch (trt_dtype)
{
case nvinfer1::DataType::kFLOAT: *cudnn_dtype = CUDNN_DATA_FLOAT; break;
case nvinfer1::DataType::kHALF: *cudnn_dtype = CUDNN_DATA_HALF; break;
default: return CUDNN_STATUS_BAD_PARAM;
}
return CUDNN_STATUS_SUCCESS;
}
namespace {
const char* INSTNORM3D_PLUGIN_VERSION{"1"};
const char* INSTNORM3D_PLUGIN_NAME{"INSTNORM3D_TRT"};
}
REGISTER_TENSORRT_PLUGIN(InstanceNormalization3DPluginCreator);
PluginFieldCollection InstanceNormalization3DPluginCreator::mFC{};
std::vector<PluginField> InstanceNormalization3DPluginCreator::mPluginAttributes;
InstanceNormalization3DPlugin::InstanceNormalization3DPlugin(
float epsilon, const std::vector<float>& scale, const std::vector<float>& bias, int relu, float alpha)
: _epsilon(epsilon)
, _nchan(scale.size())
, _h_scale(scale)
, _h_bias(bias)
, _relu(relu)
, _alpha(alpha)
, _in_scale(-1.f)
, _out_scale(-1.f)
{
ASSERT(scale.size() == bias.size());
}
InstanceNormalization3DPlugin::InstanceNormalization3DPlugin(
float epsilon, nvinfer1::Weights const& scale, nvinfer1::Weights const& bias, int relu, float alpha)
: _epsilon(epsilon)
, _nchan(scale.count)
, _relu(relu)
, _alpha(alpha)
, _in_scale(-1.f)
, _out_scale(-1.f)
{
ASSERT(scale.count == bias.count);
if (scale.type == nvinfer1::DataType::kFLOAT)
{
_h_scale.assign((float*) scale.values, (float*) scale.values + scale.count);
}
else if (scale.type == nvinfer1::DataType::kHALF)
{
_h_scale.reserve(_nchan);
for (int c = 0; c < _nchan; ++c)
{
unsigned short value = ((unsigned short*) scale.values)[c];
_h_scale.push_back(half_to_float_fast(value));
}
}
else
{
throw std::runtime_error("Unsupported scale dtype");
}
if (bias.type == nvinfer1::DataType::kFLOAT)
{
_h_bias.assign((float*) bias.values, (float*) bias.values + bias.count);
}
else if (bias.type == nvinfer1::DataType::kHALF)
{
_h_bias.reserve(_nchan);
for (int c = 0; c < _nchan; ++c)
{
unsigned short value = ((unsigned short*) bias.values)[c];
_h_bias.push_back(half_to_float_fast(value));
}
}
else
{
throw std::runtime_error("Unsupported bias dtype");
}
}
InstanceNormalization3DPlugin::InstanceNormalization3DPlugin(void const* serialData, size_t serialLength)
{
deserialize_value(&serialData, &serialLength, &_epsilon);
deserialize_value(&serialData, &serialLength, &_nchan);
deserialize_value(&serialData, &serialLength, &_h_scale);
deserialize_value(&serialData, &serialLength, &_h_bias);
deserialize_value(&serialData, &serialLength, &_relu);
deserialize_value(&serialData, &serialLength, &_alpha);
deserialize_value(&serialData, &serialLength, &_in_scale);
deserialize_value(&serialData, &serialLength, &_out_scale);
}
InstanceNormalization3DPlugin::~InstanceNormalization3DPlugin()
{
terminate();
}
// InstanceNormalization3DPlugin returns one output.
int InstanceNormalization3DPlugin::getNbOutputs() const
{
return 1;
}
DimsExprs InstanceNormalization3DPlugin::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder)
{
nvinfer1::DimsExprs output(inputs[0]);
return output;
}
int InstanceNormalization3DPlugin::initialize()
{
if (!initialized)
{
CHECK_CUDNN(cudnnCreate(&_cudnn_handle));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&_b_desc));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&_x_desc));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&_y_desc));
// NDHWC path
// Device info.
int device;
CHECK_CUDA(cudaGetDevice(&device));
cudaDeviceProp props;
CHECK_CUDA(cudaGetDeviceProperties(&props, device));
_context.sm_count = props.multiProcessorCount;
_context.sm_shared_size = props.sharedMemPerMultiprocessor;
_context.sm_version = props.major * 100 + props.minor * 10;
memset(&_params, 0, sizeof(_params));
CHECK_CUDA(cudaMalloc(&_d_scale, _nchan*sizeof(float)));
CHECK_CUDA(cudaMalloc(&_d_bias, _nchan*sizeof(float)));
CHECK_CUDA(cudaMemcpy(_d_scale, &_h_scale[0], _nchan*sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(_d_bias, &_h_bias[0], _nchan*sizeof(float), cudaMemcpyHostToDevice));
}
initialized = true;
return 0;
}
void InstanceNormalization3DPlugin::terminate()
{
if (initialized)
{
cudnnDestroyTensorDescriptor(_y_desc);
cudnnDestroyTensorDescriptor(_x_desc);
cudnnDestroyTensorDescriptor(_b_desc);
cudnnDestroy(_cudnn_handle);
cudaFree(_d_bias);
cudaFree(_d_scale);
}
initialized = false;
return;
}
size_t InstanceNormalization3DPlugin::getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs, int nbInputs, const nvinfer1::PluginTensorDesc* outputs, int nbOutputs) const
{
if (inputs[0].format == nvinfer1::PluginFormat::kLINEAR)
{
nvinfer1::Dims input_dims = inputs[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
size_t nchan_bytes = c * sizeof(float);
size_t scale_size = n * nchan_bytes;
size_t bias_size = n * nchan_bytes;
size_t total_wss = scale_size + bias_size;
return total_wss;
}
else if (inputs[0].format == nvinfer1::PluginFormat::kDHWC8 ||
inputs[0].format == nvinfer1::PluginFormat::kCDHW32)
{
int input_data_type = (inputs[0].type == nvinfer1::DataType::kHALF) ? 1 : 2;
int output_data_type = (outputs[0].type == nvinfer1::DataType::kHALF) ? 1 : 2;
nvinfer1::Dims input_dims = inputs[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
int d = input_dims.d[2];
int h = input_dims.d[3];
int w = input_dims.d[4];
InstanceNormFwdParams params;
// only these parameters are required for workspace computation
params.nhw = d*h*w;
params.c = c;
params.n = n;
// Reserve memory for the workspaces.
size_t size_sums, size_counts, size_retired_ctas;
instance_norm_buffer_sizes_dispatch(_context, params, size_sums, size_counts, size_retired_ctas,
input_data_type, output_data_type);
size_t size_nc = n*c*sizeof(float);
size_nc = ((size_nc + 256 - 1) / 256) * 256;
return size_sums + size_counts + size_retired_ctas + 4 * size_nc;
}
else
{
ASSERT(0);
}
}
int InstanceNormalization3DPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace,
cudaStream_t stream)
{
ASSERT(initialized);
if (inputDesc[0].format == nvinfer1::PluginFormat::kLINEAR)
{
CHECK_CUDNN(cudnnSetStream(_cudnn_handle, stream));
nvinfer1::Dims input_dims = inputDesc[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
int d = input_dims.d[2];
int h = input_dims.d[3];
int w = input_dims.d[4];
size_t nchan_bytes = c * sizeof(float);
// Note: We repeat the data for each batch entry so that we can do the full
// computation in a single CUDNN call in enqueue().
float* _d_array = (float*)workspace;
float* d_scale = &_d_array[0];
float* d_bias = &_d_array[n*c];
for (int i = 0; i < n; ++i)
{
CHECK_CUDA(cudaMemcpyAsync(d_scale + i * c, _d_scale, nchan_bytes, cudaMemcpyDeviceToDevice, stream));
CHECK_CUDA(cudaMemcpyAsync(d_bias + i * c, _d_bias, nchan_bytes, cudaMemcpyDeviceToDevice, stream));
}
int nc_dimA[] = {1, n*c, 1, 1, 1};
int nc_strideA[] = {nc_dimA[1]*nc_dimA[2]*nc_dimA[3]*nc_dimA[4],\
nc_dimA[2]*nc_dimA[3]*nc_dimA[4],\
nc_dimA[3]*nc_dimA[4],\
nc_dimA[4],\
1};
int img_dimA[] = {1, n*c, d, h, w};
int img_strideA[] = {img_dimA[1]*img_dimA[2]*img_dimA[3]*img_dimA[4],\
img_dimA[2]*img_dimA[3]*img_dimA[4],\
img_dimA[3]*img_dimA[4],\
img_dimA[4],\
1};
CHECK_CUDNN(cudnnSetTensorNdDescriptor(_b_desc, CUDNN_DATA_FLOAT, 5, nc_dimA, nc_strideA));
cudnnDataType_t cudnn_dtype;
CHECK_CUDNN(convert_trt2cudnn_dtype(inputDesc[0].type, &cudnn_dtype));
CHECK_CUDNN(cudnnSetTensorNdDescriptor(_x_desc, cudnn_dtype, 5, img_dimA, img_strideA));
CHECK_CUDNN(cudnnSetTensorNdDescriptor(_y_desc, cudnn_dtype, 5, img_dimA, img_strideA));
float alpha = 1;
float beta = 0;
//cudaStreamSynchronize(stream);
void const* x_ptr = inputs[0];
void* y_ptr = outputs[0];
// Note: Use of CUDNN_BATCHNORM_SPATIAL_PERSISTENT can cause numerical
// overflows (NaNs) for fp32 data in some circumstances. The lower-
// performance CUDNN_BATCHNORM_SPATIAL should be used if this is not
// acceptable.
CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(_cudnn_handle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, &alpha, &beta,
_x_desc, x_ptr, _y_desc, y_ptr, _b_desc, d_scale, d_bias, 1., nullptr, nullptr, _epsilon, nullptr, nullptr));
if (_relu > 0)
{
int count = n * c * d * h * w;
const int BLOCK_SZ = 256;
if (inputDesc[0].type == nvinfer1::DataType::kFLOAT)
{
in3d_relu_activation<float, BLOCK_SZ><<<(count + BLOCK_SZ - 1) / BLOCK_SZ, BLOCK_SZ, 0, stream>>>((float *)y_ptr, (float *)y_ptr, _alpha, count);
} else if (inputDesc[0].type == nvinfer1::DataType::kHALF)
{
in3d_relu_activation<__half, BLOCK_SZ><<<(count + BLOCK_SZ - 1) / BLOCK_SZ, BLOCK_SZ, 0, stream>>>((__half *)y_ptr, (__half *)y_ptr, _alpha, count);
}
else
{
ASSERT(0);
}
}
}
else if (inputDesc[0].format == nvinfer1::PluginFormat::kDHWC8 ||
inputDesc[0].format == nvinfer1::PluginFormat::kCDHW32)
{
int input_data_type = (inputDesc[0].type == nvinfer1::DataType::kHALF) ? 1 : 2;
int output_data_type = (outputDesc[0].type == nvinfer1::DataType::kHALF) ? 1 : 2;
nvinfer1::Dims input_dims = inputDesc[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
int d = input_dims.d[2];
int h = input_dims.d[3];
int w = input_dims.d[4];
_params.nhw = d*h*w;
_params.c = c;
_params.n = n;
size_t size_sums, size_counts, size_retired_ctas;
instance_norm_buffer_sizes_dispatch(_context, _params, size_sums, size_counts, size_retired_ctas,
input_data_type, output_data_type);
size_t size_nc = n*c*sizeof(float);
size_nc = ((size_nc + 256 - 1) / 256) * 256;
char* d_buf = reinterpret_cast<char *>(workspace);
_params.gmem_sums = reinterpret_cast<GMEM_SUMS_TYPE *>(d_buf); d_buf += size_sums;
_params.gmem_counts = reinterpret_cast<int *>(d_buf); d_buf += size_counts;
_params.gmem_retired_ctas = reinterpret_cast<int *>(d_buf); d_buf += size_retired_ctas;
_params.gmem_running_mean = reinterpret_cast<float *>(d_buf); d_buf += size_nc;
_params.gmem_running_var = reinterpret_cast<float *>(d_buf); d_buf += size_nc;
_params.gmem_saved_mean = reinterpret_cast<float *>(d_buf); d_buf += size_nc;
_params.gmem_saved_var = reinterpret_cast<float *>(d_buf); d_buf += size_nc;
_params.gmem_src = const_cast<void *>(inputs[0]);
_params.gmem_dst = outputs[0];
_params.gmem_bias = _d_bias;
_params.gmem_scale = _d_scale;
_params.var_eps = _epsilon;
_params.exp_avg_factor = 1.f; //(float)exp_avg_factor;
_params.use_relu = _relu; //use_relu;
_params.relu_alpha = _alpha; //relu_alpha;
_params.in_scale = _in_scale;
_params.out_scale = 1.f / _out_scale;
int loop = instance_norm_fwd_dispatch(_context, _params, stream, input_data_type, output_data_type);
}
else
{
ASSERT(false && "Unexpected input format");
}
return 0;
}
size_t InstanceNormalization3DPlugin::getSerializationSize() const
{
return (serialized_size(_epsilon) +
serialized_size(_nchan) +
serialized_size(_h_scale) +
serialized_size(_h_bias)) +
serialized_size(_relu) +
serialized_size(_alpha) +
serialized_size(_in_scale) +
serialized_size(_out_scale);
}
void InstanceNormalization3DPlugin::serialize(void *buffer) const
{
serialize_value(&buffer, _epsilon);
serialize_value(&buffer, _nchan);
serialize_value(&buffer, _h_scale);
serialize_value(&buffer, _h_bias);
serialize_value(&buffer, _relu);
serialize_value(&buffer, _alpha);
serialize_value(&buffer, _in_scale);
serialize_value(&buffer, _out_scale);
}
bool InstanceNormalization3DPlugin::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs)
{
ASSERT(inOut && pos < (nbInputs + nbOutputs));
bool support_fp32_linear = (inOut[pos].type == nvinfer1::DataType::kFLOAT
&& inOut[pos].format == nvinfer1::PluginFormat::kLINEAR
&& inOut[pos].type == inOut[0].type
&& inOut[pos].format == inOut[0].format);
bool support_fp16_dhwc8 = (inOut[pos].type == nvinfer1::DataType::kHALF
&& inOut[pos].format == nvinfer1::PluginFormat::kDHWC8
&& inOut[pos].type == inOut[0].type
&& inOut[pos].format == inOut[0].format);
bool support_int8_cdhw32 = (inOut[pos].type == nvinfer1::DataType::kINT8
&& inOut[pos].format == nvinfer1::PluginFormat::kCDHW32
&& inOut[pos].type == inOut[0].type
&& inOut[pos].format == inOut[0].format);
ASSERT(pos == 0 || pos == 1);
return support_fp32_linear || support_fp16_dhwc8 || support_int8_cdhw32;
}
const char* InstanceNormalization3DPlugin::getPluginType() const
{
return INSTNORM3D_PLUGIN_NAME;
}
const char* InstanceNormalization3DPlugin::getPluginVersion() const
{
return INSTNORM3D_PLUGIN_VERSION;
}
void InstanceNormalization3DPlugin::destroy()
{
delete this;
}
IPluginV2DynamicExt* InstanceNormalization3DPlugin::clone() const
{
auto plugin = new InstanceNormalization3DPlugin{_epsilon, _h_scale, _h_bias, _relu, _alpha};
plugin->setPluginNamespace(mPluginNamespace);
plugin->initialize();
return plugin;
}
// Set plugin namespace
void InstanceNormalization3DPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* InstanceNormalization3DPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
nvinfer1::DataType InstanceNormalization3DPlugin::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
ASSERT(inputTypes && nbInputs > 0 && index == 0);
return nvinfer1::DataType::kFLOAT;
}
void InstanceNormalization3DPlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs)
{
_in_scale = in[0].desc.scale;
_out_scale = out[0].desc.scale;
}
// InstanceNormalization3DPluginCreator methods
InstanceNormalization3DPluginCreator::InstanceNormalization3DPluginCreator()
{
mPluginAttributes.emplace_back(PluginField("epsilon", nullptr, PluginFieldType::kFLOAT32, 1));
mPluginAttributes.emplace_back(PluginField("scales", nullptr, PluginFieldType::kFLOAT32, 1));
mPluginAttributes.emplace_back(PluginField("bias", nullptr, PluginFieldType::kFLOAT32, 1));
mPluginAttributes.emplace_back(PluginField("relu", nullptr, PluginFieldType::kINT32, 1));
mPluginAttributes.emplace_back(PluginField("alpha", nullptr, PluginFieldType::kFLOAT32, 1));
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* InstanceNormalization3DPluginCreator::getPluginName() const
{
return INSTNORM3D_PLUGIN_NAME;
}
const char* InstanceNormalization3DPluginCreator::getPluginVersion() const
{
return INSTNORM3D_PLUGIN_VERSION;
}
const PluginFieldCollection* InstanceNormalization3DPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2DynamicExt* InstanceNormalization3DPluginCreator::createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc)
{
std::vector<float> scaleValues;
std::vector<float> biasValues;
float epsilon {};
int relu {};
float alpha {};
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; ++i)
{
const char* attrName = fields[i].name;
if (!strcmp(attrName, "epsilon"))
{
ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
epsilon= *(static_cast<const float*>(fields[i].data));
}
else if (!strcmp(attrName, "scales"))
{
ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
int size = fields[i].length;
scaleValues.reserve(size);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < size; j++)
{
scaleValues.push_back(*w);
w++;
}
}
else if (!strcmp(attrName, "bias"))
{
ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
int size = fields[i].length;
biasValues.reserve(size);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < size; j++)
{
biasValues.push_back(*w);
w++;
}
}
else if (!strcmp(attrName, "relu"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
relu= *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "alpha"))
{
ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
alpha= *(static_cast<const float*>(fields[i].data));
}
}
Weights scaleWeights{DataType::kFLOAT, scaleValues.data(), (int64_t) scaleValues.size()};
Weights biasWeights{DataType::kFLOAT, biasValues.data(), (int64_t) biasValues.size()};
InstanceNormalization3DPlugin* obj = new InstanceNormalization3DPlugin(epsilon, scaleWeights, biasWeights, relu, alpha);
obj->setPluginNamespace(mNamespace.c_str());
obj->initialize();
return obj;
}
IPluginV2DynamicExt* InstanceNormalization3DPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
InstanceNormalization3DPlugin* obj = new InstanceNormalization3DPlugin{serialData, serialLength};
obj->setPluginNamespace(mNamespace.c_str());
obj->initialize();
return obj;
}
|
c62e98cfd725c469b35323cbb7ac70bb92736a31.hip | // !!! This is a file automatically generated by hipify!!!
#include "imageFilteringGpu.cuh"
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/cudev.hpp>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
texture<uchar, hipTextureType2D, hipReadModeElementType> srcTex(false, hipFilterModePoint, hipAddressModeClamp);
__global__ void imageFilteringGpu
(
const cv::cudev::PtrStepSz<uchar> src,
cv::cudev::PtrStepSz<uchar> dst,
const cv::cudev::PtrStepSz<float> kernel,
const int border_size
)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if((y >= border_size) && y < (dst.rows-border_size)){
if((x >= border_size) && (x < (dst.cols-border_size))){
float sum = 0.0f;
for(int yy = 0; yy < kernel.rows; yy++){
for(int xx = 0; xx < kernel.cols; xx++){
sum = __fadd_rn(sum, __fmul_rn(kernel.ptr(yy)[xx], src.ptr(y+yy-border_size)[x+xx-border_size]));
}
}
dst.ptr(y)[x] = sum;
}
}
}
// use __ldg
__global__ void imageFilteringGpu_ldg
(
const cv::cudev::PtrStepSz<uchar> src,
cv::cudev::PtrStepSz<uchar> dst,
const cv::cudev::PtrStepSz<float> kernel,
const int border_size
)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if((y >= border_size) && y < (dst.rows-border_size)){
if((x >= border_size) && (x < (dst.cols-border_size))){
float sum = 0.0f;
for(int yy = 0; yy < kernel.rows; yy++){
const uchar* psrc = src.ptr(y+yy-border_size) + (x-border_size);
const float* pkernel = kernel.ptr(yy);
for(int xx = 0; xx < kernel.cols; xx++){
sum = __fadd_rn(sum, __fmul_rn(__ldg(&pkernel[xx]), __ldg(&psrc[xx])));
}
}
dst.ptr(y)[x] = sum;
}
}
}
// use texture
__global__ void imageFilteringGpu_tex
(
const cv::cudev::PtrStepSz<uchar> src,
cv::cudev::PtrStepSz<uchar> dst,
const cv::cudev::PtrStepSz<float> kernel,
const int border_size
)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if((y >= border_size) && (y < (dst.rows-border_size))){
if((x >= border_size) && (x < (dst.cols-border_size))){
float sum = 0.0f;
for(int yy = 0; yy < kernel.rows; yy++){
for(int xx = 0; xx < kernel.cols; xx++){
sum = __fadd_rn(sum, __fmul_rn(kernel.ptr(yy)[xx], tex2D(srcTex, x + xx - border_size, y + yy - border_size)));
}
}
dst.ptr(y)[x] = sum;
}
}
}
void launchImageFilteringGpu
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size
)
{
cv::cudev::PtrStepSz<uchar> pSrc =
cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step);
cv::cudev::PtrStepSz<uchar> pDst =
cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step);
cv::cudev::PtrStepSz<float> pKernel =
cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step);
const dim3 block(64, 2);
const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y));
hipLaunchKernelGGL(( imageFilteringGpu), dim3(grid), dim3(block), 0, 0, pSrc, pDst, pKernel, border_size);
CV_CUDEV_SAFE_CALL(hipGetLastError());
CV_CUDEV_SAFE_CALL(hipDeviceSynchronize());
}
// use __ldg
void launchImageFilteringGpu_ldg
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size
)
{
cv::cudev::PtrStepSz<uchar> pSrc =
cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step);
cv::cudev::PtrStepSz<uchar> pDst =
cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step);
cv::cudev::PtrStepSz<float> pKernel =
cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step);
const dim3 block(64, 2);
const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y));
hipLaunchKernelGGL(( imageFilteringGpu_ldg), dim3(grid), dim3(block), 0, 0, pSrc, pDst, pKernel, border_size);
CV_CUDEV_SAFE_CALL(hipGetLastError());
CV_CUDEV_SAFE_CALL(hipDeviceSynchronize());
}
// use texture
void launchImageFilteringGpu_tex
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size
)
{
cv::cudev::PtrStepSz<uchar> pSrc =
cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step);
cv::cudev::PtrStepSz<uchar> pDst =
cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step);
cv::cudev::PtrStepSz<float> pKernel =
cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step);
// bind texture
cv::cuda::device::bindTexture<uchar>(&srcTex, pSrc);
const dim3 block(64, 2);
const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y));
hipLaunchKernelGGL(( imageFilteringGpu_tex), dim3(grid), dim3(block), 0, 0, pSrc, pDst, pKernel, border_size);
CV_CUDEV_SAFE_CALL(hipGetLastError());
CV_CUDEV_SAFE_CALL(hipDeviceSynchronize());
// unbind texture
CV_CUDEV_SAFE_CALL(hipUnbindTexture(srcTex));
}
double launchImageFilteringGpu
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size,
const int loop_num
)
{
double f = 1000.0f / cv::getTickFrequency();
int64 start = 0, end = 0;
double time = 0.0;
for (int i = 0; i <= loop_num; i++){
start = cv::getTickCount();
launchImageFilteringGpu(src, dst, kernel, border_size);
end = cv::getTickCount();
time += (i > 0) ? ((end - start) * f) : 0;
}
time /= loop_num;
return time;
}
double launchImageFilteringGpu_ldg
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size,
const int loop_num
)
{
double f = 1000.0f / cv::getTickFrequency();
int64 start = 0, end = 0;
double time = 0.0;
for (int i = 0; i <= loop_num; i++){
start = cv::getTickCount();
launchImageFilteringGpu_ldg(src, dst, kernel, border_size);
end = cv::getTickCount();
time += (i > 0) ? ((end - start) * f) : 0;
}
time /= loop_num;
return time;
}
double launchImageFilteringGpu_tex
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size,
const int loop_num
)
{
double f = 1000.0f / cv::getTickFrequency();
int64 start = 0, end = 0;
double time = 0.0;
for (int i = 0; i <= loop_num; i++){
start = cv::getTickCount();
launchImageFilteringGpu_tex(src, dst, kernel, border_size);
end = cv::getTickCount();
time += (i > 0) ? ((end - start) * f) : 0;
}
time /= loop_num;
return time;
}
| c62e98cfd725c469b35323cbb7ac70bb92736a31.cu | #include "imageFilteringGpu.cuh"
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/cudev.hpp>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
texture<uchar, cudaTextureType2D, cudaReadModeElementType> srcTex(false, cudaFilterModePoint, cudaAddressModeClamp);
__global__ void imageFilteringGpu
(
const cv::cudev::PtrStepSz<uchar> src,
cv::cudev::PtrStepSz<uchar> dst,
const cv::cudev::PtrStepSz<float> kernel,
const int border_size
)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if((y >= border_size) && y < (dst.rows-border_size)){
if((x >= border_size) && (x < (dst.cols-border_size))){
float sum = 0.0f;
for(int yy = 0; yy < kernel.rows; yy++){
for(int xx = 0; xx < kernel.cols; xx++){
sum = __fadd_rn(sum, __fmul_rn(kernel.ptr(yy)[xx], src.ptr(y+yy-border_size)[x+xx-border_size]));
}
}
dst.ptr(y)[x] = sum;
}
}
}
// use __ldg
__global__ void imageFilteringGpu_ldg
(
const cv::cudev::PtrStepSz<uchar> src,
cv::cudev::PtrStepSz<uchar> dst,
const cv::cudev::PtrStepSz<float> kernel,
const int border_size
)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if((y >= border_size) && y < (dst.rows-border_size)){
if((x >= border_size) && (x < (dst.cols-border_size))){
float sum = 0.0f;
for(int yy = 0; yy < kernel.rows; yy++){
const uchar* psrc = src.ptr(y+yy-border_size) + (x-border_size);
const float* pkernel = kernel.ptr(yy);
for(int xx = 0; xx < kernel.cols; xx++){
sum = __fadd_rn(sum, __fmul_rn(__ldg(&pkernel[xx]), __ldg(&psrc[xx])));
}
}
dst.ptr(y)[x] = sum;
}
}
}
// use texture
__global__ void imageFilteringGpu_tex
(
const cv::cudev::PtrStepSz<uchar> src,
cv::cudev::PtrStepSz<uchar> dst,
const cv::cudev::PtrStepSz<float> kernel,
const int border_size
)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if((y >= border_size) && (y < (dst.rows-border_size))){
if((x >= border_size) && (x < (dst.cols-border_size))){
float sum = 0.0f;
for(int yy = 0; yy < kernel.rows; yy++){
for(int xx = 0; xx < kernel.cols; xx++){
sum = __fadd_rn(sum, __fmul_rn(kernel.ptr(yy)[xx], tex2D(srcTex, x + xx - border_size, y + yy - border_size)));
}
}
dst.ptr(y)[x] = sum;
}
}
}
void launchImageFilteringGpu
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size
)
{
cv::cudev::PtrStepSz<uchar> pSrc =
cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step);
cv::cudev::PtrStepSz<uchar> pDst =
cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step);
cv::cudev::PtrStepSz<float> pKernel =
cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step);
const dim3 block(64, 2);
const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y));
imageFilteringGpu<<<grid, block>>>(pSrc, pDst, pKernel, border_size);
CV_CUDEV_SAFE_CALL(cudaGetLastError());
CV_CUDEV_SAFE_CALL(cudaDeviceSynchronize());
}
// use __ldg
void launchImageFilteringGpu_ldg
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size
)
{
cv::cudev::PtrStepSz<uchar> pSrc =
cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step);
cv::cudev::PtrStepSz<uchar> pDst =
cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step);
cv::cudev::PtrStepSz<float> pKernel =
cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step);
const dim3 block(64, 2);
const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y));
imageFilteringGpu_ldg<<<grid, block>>>(pSrc, pDst, pKernel, border_size);
CV_CUDEV_SAFE_CALL(cudaGetLastError());
CV_CUDEV_SAFE_CALL(cudaDeviceSynchronize());
}
// use texture
void launchImageFilteringGpu_tex
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size
)
{
cv::cudev::PtrStepSz<uchar> pSrc =
cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step);
cv::cudev::PtrStepSz<uchar> pDst =
cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step);
cv::cudev::PtrStepSz<float> pKernel =
cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step);
// bind texture
cv::cuda::device::bindTexture<uchar>(&srcTex, pSrc);
const dim3 block(64, 2);
const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y));
imageFilteringGpu_tex<<<grid, block>>>(pSrc, pDst, pKernel, border_size);
CV_CUDEV_SAFE_CALL(cudaGetLastError());
CV_CUDEV_SAFE_CALL(cudaDeviceSynchronize());
// unbind texture
CV_CUDEV_SAFE_CALL(cudaUnbindTexture(srcTex));
}
double launchImageFilteringGpu
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size,
const int loop_num
)
{
double f = 1000.0f / cv::getTickFrequency();
int64 start = 0, end = 0;
double time = 0.0;
for (int i = 0; i <= loop_num; i++){
start = cv::getTickCount();
launchImageFilteringGpu(src, dst, kernel, border_size);
end = cv::getTickCount();
time += (i > 0) ? ((end - start) * f) : 0;
}
time /= loop_num;
return time;
}
double launchImageFilteringGpu_ldg
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size,
const int loop_num
)
{
double f = 1000.0f / cv::getTickFrequency();
int64 start = 0, end = 0;
double time = 0.0;
for (int i = 0; i <= loop_num; i++){
start = cv::getTickCount();
launchImageFilteringGpu_ldg(src, dst, kernel, border_size);
end = cv::getTickCount();
time += (i > 0) ? ((end - start) * f) : 0;
}
time /= loop_num;
return time;
}
double launchImageFilteringGpu_tex
(
cv::cuda::GpuMat& src,
cv::cuda::GpuMat& dst,
cv::cuda::GpuMat& kernel,
const int border_size,
const int loop_num
)
{
double f = 1000.0f / cv::getTickFrequency();
int64 start = 0, end = 0;
double time = 0.0;
for (int i = 0; i <= loop_num; i++){
start = cv::getTickCount();
launchImageFilteringGpu_tex(src, dst, kernel, border_size);
end = cv::getTickCount();
time += (i > 0) ? ((end - start) * f) : 0;
}
time /= loop_num;
return time;
}
|
9cb5a0202d519daf9c29dcc49bf2567caa4ad9b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <cmath>
#include <math.h>
#include <cstdlib>
#include <ctime>
#include <fstream>
#include <cstring>
#include <string>
#include <algorithm>
#include <random>
#include <numeric>
#include <time.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
//#include <thrust/sort.h>
//#include <thrust/execution_policy.h>
//bacteria surface geometry, UUU boundary semi-circle, Brownian noise
//Last updated: March 10, 2021
using namespace std;
//set random seed for uniform distribution for angles
unsigned int seed = time(NULL);
default_random_engine engine(seed);
#define THREADS_PER_BLOCK 128
#define TILE_SIZE 128
#define PI 3.14159265358979
#define K_B 1.38064852E-23 //m^2 kg s^-2 K^-1
#define DIM 3
//====================================================================================
//Returns the inverse parallel geometric factor for the
// translational friction tensor. a is the aspect ratio: a=l/d
__device__ double inverse_parallel_geo_factor(double a)
{
double inverse_parallel = (log(a) - 0.207 + 0.980 / a - 0.133 / (a * a))
* (1.0 / (2.0 * PI * a));
return inverse_parallel;
}
//Returns the inverse perpendicular geometric factor for the
// translational friction tensor. a is the aspect ratio: a=l/d
__device__ double inverse_perpendicular_geo_factor(double a)
{
double inverse_perp = (log(a) + 0.839 + 0.185 / a + 0.233 / (a * a))
* ( 1.0 / (4.0 * PI * a));
return inverse_perp;
}
//Returns the rotational geometric factor for the
// rotation friction tensor. a is the aspect ratio: a=l/d
__device__ double inverse_rotation_geo_factor(double a)
{
double inverse_rotation = (log(a) - 0.662 + 0.917 / a - 0.050 / (a * a))
* ( 3.0 / (PI * a * a));
return inverse_rotation;
}
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, hiprandState_t *state) {
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
/* Each thread gets same seed, a different sequence number, no offset */
hiprand_init(seed, id, 0, &state[id]);
}
__global__ void init(unsigned int seed, hiprandStatePhilox4_32_10_t *state) {
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
/* Each thread gets same seed, a different sequence number, no offset */
hiprand_init(seed, id, 0, &state[id]);
}
__global__ void generate_random_numbers_noise(hiprandStatePhilox4_32_10_t *state, float4 *numbers) {
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
/* Copy state to local memory for efficiency */
hiprandStatePhilox4_32_10_t localState = state[id];
numbers[id] = hiprand_normal4(&localState);
/* Copy state back to global memory */
state[id] = localState;
}
__device__ double ys_fcn(double4 xa, double xs, double R, double C)
{
double ys;
double param = (2.0 * R / PI) * acos(cos(PI * xs / (2.0 * R))) - R;
if (xa.y > 0.0)
{
ys = sqrt(R * R - param * param) + C;
}
else
{
ys = -sqrt(R * R - param * param) - C;
}
return ys;
}
__device__ double dysdxs_fcn(double xs, double ys, double R)
{
double dysdxs;
double numer = R * (PI - 2.0 * acos(cos(PI * xs / (2.0 * R)))) * sin(PI * xs / (2.0 * R));
double denom = sqrt(2.0) * sqrt(R * R * (PI - acos(cos(PI * xs / (2.0 * R)))) * acos(cos(PI * xs / (2.0 * R)))) *
sqrt(1.0 - cos(PI * xs / R));
if (ys > 0)
{
dysdxs = numer / denom;
}
else
{
dysdxs = -numer / denom;
}
return dysdxs;
}
__device__ double d2ysdxs2_fcn(double xs, double ys, double R)
{
double d2ysdxs2;
double numer = PI * PI * PI * R * R;
double denom_param1 = (asin(cos(PI * xs / (2.0 * R)))) * (asin(cos(PI * xs / (2.0 * R))));
double denom_param2 = sqrt(R * R * (PI * PI - 4.0 * denom_param1));
double denom = denom_param2 * denom_param2 * denom_param2;
if (ys > 0)
{
d2ysdxs2 = -numer / denom;
}
else
{
d2ysdxs2 = numer / denom;
}
return d2ysdxs2;
}
__device__ double func(double4 xa, double xs, double R, double C)
{
double ys = ys_fcn(xa, xs, R, C);
double dysdxs = dysdxs_fcn(xs, ys, R);
double func_value = -xa.x + xs + (ys - xa.y) * dysdxs;
return func_value;
}
__device__ double dfunc_dx(double4 xa, double xs, double R, double C)
{
double ys = ys_fcn(xa, xs, R, C);
double dysdxs = dysdxs_fcn(xs, ys, R);
double d2ysdxs2 = d2ysdxs2_fcn(xs, ys, R);
double dfunc_dx_value = 1.0 + dysdxs * dysdxs + (ys - xa.y) * d2ysdxs2;
return dfunc_dx_value;
}
__device__ double3 SurfaceNormal(double xs, double ys, double R)
{
double dysdxs = dysdxs_fcn( xs, ys, R );
double denom = sqrt(dysdxs * dysdxs + 1.0);
double dysplusdxs = dysdxs_fcn( xs, 1.0, R );
double3 N;
N.x = dysplusdxs / denom;
N.z = 0.0;
if (ys > 0.0)
{
N.y = -1.0 / denom;
}
else
{
N.y = 1.0 / denom;
}
return N;
}
__device__ double bisection(double xl, double xu, double4 xa, double R, double C)
{
double es = 0.5; //percent
int imax = 20;
double fl = func(xa, xl, R, C);
double fu = func(xa, xu, R, C);
double xs;
if (fl * fu < 0.0)
{
double ea = 1.0;
int iter = 0;
double xr = xl;
double test, fr, xrold;
while (iter < imax && ea > es)
{
xrold = xr;
xr = (xl + xu) / 2.0;
fr = func(xa, xr, R, C);
test = fl * fr;
ea = abs((xr - xrold) / xr) * 100.0;
if (test < 0.0)
{
xu = xr;
}
else
{
xl = xr;
fl = fr;
}
iter++;
}
xs = xr;
}
else
{
xs = sqrt(-1.0);
}
return xs;
}
__device__ bool isNaN(double s)
{
// http.developer.nvidia.com/Cg/isnan.html
return s != s;
}
__device__ double3 PointOnSurface(double4 xa, double R, double C)
{
double xlower_bound, xupper_bound;
double xl1, xu1, xs1, xl2, xu2, xs2, chck1, chck2, xs, ys;
double3 S;
//bounds are always set to be within the semi-circle the point is in
double x_star = fmod(xa.x, (2.0 * R));
xlower_bound = xa.x - 0.99 * x_star;
xupper_bound = xa.x + 0.99 * ((2.0 * R) - x_star);
xl1 = xlower_bound;
xu1 = xa.x;
xl2 = xa.x;
xu2 = xupper_bound;
//bisection on each section:
xs1 = bisection(xl1, xu1, xa, R, C);
xs2 = bisection(xl2, xu2, xa, R, C);
//check roots
if (isNaN(xs1) == 1)
{
chck1 = -1;
}
else
{
chck1 = dfunc_dx(xa, xs1, R, C);
}
if (isNaN(xs2) == 1)
{
chck2 = -1;
}
else
{
chck2 = dfunc_dx(xa, xs2, R, C);
}
if (chck1 > 0)
{
xs = xs1;
}
else if (chck2 > 0)
{
xs = xs2;
}
else
{
xs = xa.x;
}
ys = ys_fcn(xa, xs, R, C);
S.x = xs;
S.y = ys;
S.z = 0.0;
return S;
}
__device__ double2 dxsys_dxa(double3 S, double R, int i)
{
double2 dxsysdxa;
double dysdxs = dysdxs_fcn(S.x, S.y, R);
if (i == 1)
{
dxsysdxa.x = 1.0;
dxsysdxa.y = 1.0 / dysdxs;
}
else if (i == 2)
{
dxsysdxa.x = dysdxs;
dxsysdxa.y = 1.0;
}
return dxsysdxa;
}
__device__ double2 dN_dxs(double xs, double ys, double R)
{
double2 dNdxs;
dNdxs.x = - 1.0 / R;
double numer1 = sqrt(1.0 / (PI * PI - 4.0 * (asin(cos(PI * xs / (2.0 * R)))) * (asin(cos(PI * xs / (2.0 * R)))) ));
double numer = 4.0 * asin(cos(PI * xs / (2.0 * R))) * numer1 * sin(PI * xs / (2.0 * R));
double denom = R * sqrt(2.0 - 2.0 * cos(PI * xs / R));
dNdxs.y = - numer / denom;
if (ys < 0.0)
{
dNdxs.y = numer / denom;
}
return dNdxs;
}
__global__ void calculate_BodyWallInteraction(double3 *d_dUbdy_dxa,
double3 *d_dUbdy_dna, double4 *d_x, double4 *d_n,
double sigma_bdy, double R, double C, int N)
{
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid < N)
{
double3 dUbdy_dxa, dUbdy_dna;
dUbdy_dxa.x = 0.0;
dUbdy_dxa.y = 0.0;
dUbdy_dxa.z = 0.0;
dUbdy_dna.x = 0.0;
dUbdy_dna.y = 0.0;
dUbdy_dna.z = 0.0;
double4 xa = d_x[gtid];
double4 na = d_n[gtid];
double la = xa.w;
double da = na.w;
double y = ys_fcn(xa, xa.x, R, C);
double chk1 = abs(xa.y);
double chk2 = abs(y) - (la + da);
if (chk1 > chk2)
{
double tol = 1.0 * da; //bacteria width
double x_star = fmod(xa.x, (2.0 * R));
if (x_star <= tol || ((2.0 * R) - x_star) <= tol) //bacteria is near peak
{
if (abs(1.0 - abs(na.y)) < 0.2) //bacteria near peak and vertical => treat like flat boundry
{
double3 S, W_hat;
if (xa.y <= 0.0) //bottom surface
{
W_hat.x = 0.0;
W_hat.y = 1.0;
W_hat.z = 0.0;
S.x = xa.x;
S.y = -abs(C);
S.z = 0.0;
}
else // top surface
{
W_hat.x = 0.0;
W_hat.y = -1.0;
W_hat.z = 0.0;
S.x = xa.x;
S.y = abs(C);
S.z = 0.0;
}
double dot_na_W_hat, dot_xa_W_hat, dot_W_hat_S, r_alpha;
dot_na_W_hat = na.x * W_hat.x + na.y * W_hat.y + na.z * W_hat.z;
dot_xa_W_hat = xa.x * W_hat.x + xa.y * W_hat.y + xa.z * W_hat.z;
dot_W_hat_S = W_hat.x * S.x + W_hat.y * S.y + W_hat.z * S.z;
r_alpha = la * abs(dot_na_W_hat) + da - dot_xa_W_hat + dot_W_hat_S;
double dUbdy_dralpha;
double3 dralpha_dna;
if (r_alpha > 0.0) //contact with boundary
{
dUbdy_dralpha = 0.01 * (1.0 / sigma_bdy) * exp(r_alpha / sigma_bdy);
//0.01 factor to reduce the effect of the flat boundary
//boundary force derivatives:
dUbdy_dxa.x = dUbdy_dralpha * -W_hat.x;
dUbdy_dxa.y = dUbdy_dralpha * -W_hat.y;
dUbdy_dxa.z = dUbdy_dralpha * -W_hat.z;
//boundary orientation derivatives:
if (dot_na_W_hat == 0.0)
{
dUbdy_dna.x = 0.0;
dUbdy_dna.y = 0.0;
dUbdy_dna.z = 0.0;
}
else
{
dralpha_dna.x = (la * dot_na_W_hat / abs(dot_na_W_hat)) * W_hat.x;
dralpha_dna.y = (la * dot_na_W_hat / abs(dot_na_W_hat)) * W_hat.y;
dralpha_dna.z = (la * dot_na_W_hat / abs(dot_na_W_hat)) * W_hat.z;
dUbdy_dna.x = dUbdy_dralpha * dralpha_dna.x;
dUbdy_dna.y = dUbdy_dralpha * dralpha_dna.y;
dUbdy_dna.z = dUbdy_dralpha * dralpha_dna.z;
}
}
}
}
else
{
double3 S, Nhat; // point on the surface closest to bacteria
S = PointOnSurface(xa, R, C);
Nhat = SurfaceNormal(S.x, S.y, R);
double3 xa_S;
xa_S.x = xa.x - S.x;
xa_S.y = xa.y - S.y;
xa_S.z = xa.z - S.z;
double dot_na_Nhat, dot_Nhat_xa_S, r_alpha;
dot_na_Nhat = na.x * Nhat.x + na.y * Nhat.y + na.z * Nhat.z;
dot_Nhat_xa_S = Nhat.x * xa_S.x + Nhat.y * xa_S.y + Nhat.z * xa_S.z;
r_alpha = la * abs(dot_na_Nhat) + da - dot_Nhat_xa_S;
if (r_alpha > 0.0) //contact with boundary
{
double dUbdy_dralpha;
double3 dralpha_dna, dralpha_dxa;
dUbdy_dralpha = (1.0 / sigma_bdy) * exp(r_alpha / sigma_bdy);
double2 dNdxs = dN_dxs(S.x, S.y, R);
double2 dxys_dxa1 = dxsys_dxa(S, R, 1);
double2 dxys_dxa2 = dxsys_dxa(S, R, 2);
double dysdxs = dysdxs_fcn(S.x, S.y, R);
double dxsdys = 1.0 / dysdxs;
double c1 = la * dot_na_Nhat / abs(dot_na_Nhat);
double c2 = (xa.x - S.x) * dNdxs.x;
double c3 = (xa.y - S.y) * dNdxs.y;
dralpha_dxa.x = c1 * (na.x * dNdxs.x + na.y * dNdxs.y) * dxys_dxa1.x
- (c2 + c3) * dxys_dxa1.x
- Nhat.x
+ (Nhat.x + Nhat.y * dysdxs) * dxys_dxa1.x
+ (Nhat.x * dxsdys + Nhat.y) * dxys_dxa1.y;
dralpha_dxa.y = c1 * (na.x * dNdxs.x + na.y * dNdxs.y) * dxys_dxa2.x
- (c2 + c3) * dxys_dxa2.x
- Nhat.y
+ (Nhat.x + Nhat.y * dysdxs) * dxys_dxa2.x
+ (Nhat.x * dxsdys + Nhat.y) * dxys_dxa2.y;
dralpha_dxa.z = 0.0;
//boundary force derivatives:
dUbdy_dxa.x = dUbdy_dralpha * dralpha_dxa.x;
dUbdy_dxa.y = dUbdy_dralpha * dralpha_dxa.y;
dUbdy_dxa.z = dUbdy_dralpha * dralpha_dxa.z;
//boundary orientation derivatives:
if (dot_na_Nhat == 0.0)
{
dUbdy_dna.x = 0.0;
dUbdy_dna.y = 0.0;
dUbdy_dna.z = 0.0;
}
else
{
dralpha_dna.x = c1 * Nhat.x;
dralpha_dna.y = c1 * Nhat.y;
dralpha_dna.z = c1 * Nhat.z;
dUbdy_dna.x = dUbdy_dralpha * dralpha_dna.x;
dUbdy_dna.y = dUbdy_dralpha * dralpha_dna.y;
dUbdy_dna.z = dUbdy_dralpha * dralpha_dna.z;
}
}
}
}
// Save the result in global memory for the integration step
d_dUbdy_dxa[gtid] = dUbdy_dxa;
d_dUbdy_dna[gtid] = dUbdy_dna;
}
}
__global__ void time_marching(double4 *d_x, double4 *d_n,
double3 *d_dUbdy_dxa, double3 *d_dUbdy_dna,
double epsilon_r,
double inverse_Pe_T, double inverse_Pe_parallel, double inverse_Pe_perp, double inverse_Pe_R,
double dt, int N, double L,
double *d_t_run, double *d_t_tumble, int *d_tumble_flag,
double delta_run, double delta_tumble,
double avg_n_tumble, double std_n_tumble,
hiprandState_t *state, float4 *d_random_numbers_noise)
{
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid < N)
{
double4 xa = d_x[gtid];
double4 na = d_n[gtid];
float4 random_numbers_noise = d_random_numbers_noise[gtid];
double la = xa.w;
double da = na.w;
double3 dUbdy_dxa = d_dUbdy_dxa[gtid];
double3 dUbdy_dna = d_dUbdy_dna[gtid];
double4 x_new;
double4 n_new;
//-----Start: creating orientation and orientation projection matrix-----
double ori_matrix[DIM][DIM];
ori_matrix[0][0] = na.x * na.x;
ori_matrix[1][1] = na.y * na.y;
ori_matrix[2][2] = na.z * na.z;
ori_matrix[0][1] = na.x * na.y;
ori_matrix[0][2] = na.x * na.z;
ori_matrix[1][2] = na.y * na.z;
ori_matrix[1][0] = ori_matrix[0][1];
ori_matrix[2][0] = ori_matrix[0][2];
ori_matrix[2][1] = ori_matrix[1][2];
double ori_proj_matrix[DIM][DIM];
ori_proj_matrix[0][0] = 1.0 - na.x * na.x;
ori_proj_matrix[1][1] = 1.0 - na.y * na.y;
ori_proj_matrix[2][2] = 1.0 - na.z * na.z;
ori_proj_matrix[0][1] = 0.0 - na.x * na.y;
ori_proj_matrix[0][2] = 0.0 - na.x * na.z;
ori_proj_matrix[1][2] = 0.0 - na.y * na.z;
ori_proj_matrix[1][0] = ori_proj_matrix[0][1];
ori_proj_matrix[2][0] = ori_proj_matrix[0][2];
ori_proj_matrix[2][1] = ori_proj_matrix[1][2];
//-----End: creating orientation and orientation projection matrix-----
//-----Start: time-marching + tumbling dynamics-----
if (d_tumble_flag[gtid] == 1) //tumbling
{
d_t_tumble[gtid] += dt;
if (d_t_tumble[gtid] < delta_tumble) //don't move
{
x_new.x = xa.x;
x_new.y = xa.y;
x_new.z = xa.z;
x_new.w = xa.w;
n_new.x = na.x;
n_new.y = na.y;
n_new.z = na.z;
n_new.w = na.w;
}
else //tumble
{
d_tumble_flag[gtid] = 0;
d_t_tumble[gtid] = 0.0;
float angle;
double rad_angle;
hiprandState_t localState = state[gtid];
angle = hiprand_normal(&localState);
angle = angle * std_n_tumble + avg_n_tumble;
while (angle < 0.0 || angle > 180.0)
{
angle = hiprand_normal(&localState);
angle = angle * std_n_tumble + avg_n_tumble;
}
double uniform1 = hiprand_uniform(&localState); //number between 0 and 1
if (uniform1 < 0.5) //otherwise angle is positive
{
angle = -angle;
}
state[gtid] = localState;
rad_angle = angle * PI / 180; //convert to radians
//rotation matrix
double R[2][2];
R[0][0] = cos(rad_angle);
R[0][1] = -sin(rad_angle);
R[1][0] = sin(rad_angle);
R[1][1] = cos(rad_angle);
n_new.x = R[0][0] * na.x + R[0][1] * na.y;
n_new.y = R[1][0] * na.x + R[1][1] * na.y;
n_new.z = 0.0;
n_new.w = da;
x_new.x = xa.x;
x_new.y = xa.y;
x_new.z = xa.z;
x_new.w = xa.w;
}
}
else //run
{
d_t_run[gtid] += dt;
if (d_t_run[gtid] < delta_run) //run
{
//translational dynamics:
//calculating geometric factors:
double aspect = la/da;
double inverse_parallel = inverse_parallel_geo_factor(aspect);
double inverse_perp = inverse_perpendicular_geo_factor(aspect);
double inverse_rotation = inverse_rotation_geo_factor(aspect);
//-----Start: creating Gamma_inverse matrix-----
double Gamma_inverse[DIM][DIM];
for(int i = 0; i < DIM; i++)
{
for(int j = 0; j < DIM; j++)
{
Gamma_inverse[i][j] = inverse_parallel * ori_matrix[i][j]
+ inverse_perp * ori_proj_matrix[i][j];
}
}
//-----End: creating Gamma_inverse matrix-----
//-----Start: creating translational diffusion matrix-----
double Pe_trans_matrix[DIM][DIM];
double sqrt_Pe_inverse_parallel = sqrt(inverse_Pe_parallel);
double sqrt_Pe_inverse_perp = sqrt(inverse_Pe_perp);
for(int i = 0; i < DIM; i++)
{
for(int j = 0; j < DIM; j++)
{
Pe_trans_matrix[i][j] = sqrt_Pe_inverse_parallel * ori_matrix[i][j]
+ sqrt_Pe_inverse_perp * ori_proj_matrix[i][j];
}
}
//-----End: creating translational diffusion matrix-----
//adding it all together:
double3 x_b;
x_b.x = - epsilon_r * (inverse_Pe_T) * dUbdy_dxa.x;
x_b.y = - epsilon_r * (inverse_Pe_T) * dUbdy_dxa.y;
x_b.z = - epsilon_r * (inverse_Pe_T) * dUbdy_dxa.z;
//matrix multiply:
double3 Gamma_inverse_x_b;
Gamma_inverse_x_b.x = Gamma_inverse[0][0] * x_b.x
+ Gamma_inverse[0][1] * x_b.y
+ Gamma_inverse[0][2] * x_b.z;
Gamma_inverse_x_b.y = Gamma_inverse[1][0] * x_b.x
+ Gamma_inverse[1][1] * x_b.y
+ Gamma_inverse[1][2] * x_b.z;
Gamma_inverse_x_b.z = Gamma_inverse[2][0] * x_b.x
+ Gamma_inverse[2][1] * x_b.y
+ Gamma_inverse[2][2] * x_b.z;
//noise:
float3 d_xi;
d_xi.x = random_numbers_noise.x * sqrt(2.0 * dt);
d_xi.y = random_numbers_noise.y * sqrt(2.0 * dt);
d_xi.z = 0.0;
float3 trans_noise;
trans_noise.x = Pe_trans_matrix[0][0] * d_xi.x
+ Pe_trans_matrix[0][1] * d_xi.y
+ Pe_trans_matrix[0][2] * d_xi.z;
trans_noise.y = Pe_trans_matrix[1][0] * d_xi.x
+ Pe_trans_matrix[1][1] * d_xi.y
+ Pe_trans_matrix[1][2] * d_xi.z;
trans_noise.z = Pe_trans_matrix[2][0] * d_xi.x
+ Pe_trans_matrix[2][1] * d_xi.y
+ Pe_trans_matrix[2][2] * d_xi.z;
//time step:
x_new.x = xa.x + na.x * dt + Gamma_inverse_x_b.x * dt + trans_noise.x;
x_new.y = xa.y + na.y * dt + Gamma_inverse_x_b.y * dt + trans_noise.y;
x_new.z = 0.0;
x_new.w = la;
//orientation dynamics
double3 n_b;
int dim = 2;
n_b.x = - epsilon_r * (inverse_Pe_R) * inverse_rotation * dUbdy_dna.x + (1 - dim) * (inverse_Pe_R) * na.x;
n_b.y = - epsilon_r * (inverse_Pe_R) * inverse_rotation * dUbdy_dna.y + (1 - dim) * (inverse_Pe_R) * na.y;
n_b.z = - epsilon_r * (inverse_Pe_R) * inverse_rotation * dUbdy_dna.z + (1 - dim) * (inverse_Pe_R) * na.z;
double3 ori_proj_n_b;
ori_proj_n_b.x = ori_proj_matrix[0][0] * n_b.x
+ ori_proj_matrix[0][1] * n_b.y
+ ori_proj_matrix[0][2] * n_b.z;
ori_proj_n_b.y = ori_proj_matrix[1][0] * n_b.x
+ ori_proj_matrix[1][1] * n_b.y
+ ori_proj_matrix[1][2] * n_b.z;
ori_proj_n_b.z = ori_proj_matrix[2][0] * n_b.x
+ ori_proj_matrix[2][1] * n_b.y
+ ori_proj_matrix[2][2] * n_b.z;
//noise:
float3 d_zeta;
d_zeta.x = random_numbers_noise.z * sqrt(2.0 * (inverse_Pe_R) * dt);
d_zeta.y = random_numbers_noise.w * sqrt(2.0 * (inverse_Pe_R) * dt);
d_zeta.z = 0.0;
double3 ori_noise;
ori_noise.x = ori_proj_matrix[0][0] * d_zeta.x
+ ori_proj_matrix[0][1] * d_zeta.y
+ ori_proj_matrix[0][2] * d_zeta.z;
ori_noise.y = ori_proj_matrix[1][0] * d_zeta.x
+ ori_proj_matrix[1][1] * d_zeta.y
+ ori_proj_matrix[1][2] * d_zeta.z;
ori_noise.z = ori_proj_matrix[2][0] * d_zeta.x
+ ori_proj_matrix[2][1] * d_zeta.y
+ ori_proj_matrix[2][2] * d_zeta.z;
n_new.x = na.x + ori_proj_n_b.x * dt + ori_noise.x;
n_new.y = na.y + ori_proj_n_b.y * dt + ori_noise.y;
n_new.z = 0.0;
n_new.w = da;
}
else
{
d_tumble_flag[gtid] = 1;
d_t_run[gtid] = 0.0;
x_new.x = xa.x;
x_new.y = xa.y;
x_new.z = xa.z;
x_new.w = xa.w;
n_new.x = na.x;
n_new.y = na.y;
n_new.z = na.z;
n_new.w = na.w;
}
}
//-----End: time-marching + tumbling dynamics-----
//normalize n afterwards:
double magn_n_new_Sqrd = n_new.x * n_new.x + n_new.y * n_new.y + n_new.z * n_new.z;
double magn_n_new = sqrt(magn_n_new_Sqrd);
n_new.x = (n_new.x / magn_n_new);
n_new.y = (n_new.y / magn_n_new);
n_new.z = (n_new.z / magn_n_new);
//periodic BC
if (x_new.x < 0.0)
{
x_new.x = L - x_new.x;
}
else if (x_new.x > L)
{
double delta_x = x_new.x - L;
x_new.x = delta_x;
}
// Save the result in global memory
d_x[gtid] = x_new;
d_n[gtid] = n_new;
}
}
//returns the greater common divisor of two numbers
int gcd(int first_number, int second_number)
{
int gcd_value;
for(int i = 1; i <= first_number && i <= second_number; i++)
{
if(first_number % i == 0 && second_number % i == 0 )
{
gcd_value = i;
}
}
return gcd_value;
}
//loads the .txt file that contains the simulation input variables data
void load_textfile_sim_parameters( char filename[],
int& sim_num, int& case_num,
double& dt, double& time_save, double& start_time, double& final_time,
int& N, double& l, double& d,
double& C, double& L, double& R,
double& epsilon_r, double& sigma_bdy,
double& inverse_Pe_T, double& inverse_Pe_parallel, double& inverse_Pe_perp, double& inverse_Pe_R,
double& delta_run, double& delta_tumble, double& avg_n_tumble, double& std_n_tumble)
{
ifstream infile(filename);
if (infile.fail())
{
cout<<"\nSimulation parameters input file opening failed.\n";
exit(1);
}
int number_inputs = 22;
double input_vec[number_inputs];
for (int i = 0; i < number_inputs; i++)
{
infile >> input_vec[i];
}
int i = 0;
sim_num = int(input_vec[i]);
case_num = int(input_vec[++i]);
dt = input_vec[++i];
time_save = input_vec[++i];
start_time = input_vec[++i];
final_time = input_vec[++i];
N = int(input_vec[++i]);
l = input_vec[++i];
d = input_vec[++i];
C = input_vec[++i];
L = input_vec[++i];
R = input_vec[++i];
epsilon_r = input_vec[++i];
sigma_bdy = input_vec[++i];
inverse_Pe_T = input_vec[++i];
inverse_Pe_parallel = input_vec[++i];
inverse_Pe_perp = input_vec[++i];
inverse_Pe_R = input_vec[++i];
delta_run = input_vec[++i];
delta_tumble = input_vec[++i];
avg_n_tumble = input_vec[++i];
std_n_tumble = input_vec[++i];
cout << "\nSimulation parameters loaded\n";
}
void initial_loading(double4 x[], double4 n[], int N, double C, double L,
double l, double d, double t_run[], double t_tumble[], double delta_run, double simulation_time)
{
double factorL = 1.0;
double factorLminus1 = 1.0 - factorL;
double xmin = 0.0 + 0.5 * factorLminus1 * L;
double xmax = L - 0.5 * factorLminus1 * L;
double ymin = -C;
double ymax = C;
uniform_real_distribution<double> uniform_x(xmin, xmax);
uniform_real_distribution<double> uniform_y(ymin, ymax);
uniform_real_distribution<double> uniform_dist_angle(0, 2.0 * PI );
uniform_real_distribution<double> uniform_dist_run_time(0.0, delta_run);
double angle;
for(int alpha = 0; alpha < N; alpha++)
{
//set bacteria dimensions:
x[alpha].w = l;
n[alpha].w = d;
//set initial positions
x[alpha].x = uniform_x(engine);
x[alpha].y = uniform_y(engine);
x[alpha].z = 0.0;
//set initial bacteria orientations:
angle = uniform_dist_angle(engine);
n[alpha].x = cos(angle);
n[alpha].y = sin(angle);
n[alpha].z = 0.0;
//set initial run time
if (delta_run < simulation_time) {
t_run[alpha] = uniform_dist_run_time(engine);
}
else {
t_run[alpha] = 0.0;
}
//set initial tumble time
t_tumble[alpha] = 0.0;
}
return;
}
//Returns the eigenvectors corresponding to the orientation vectors for
// all the bacteria.
void eigenvectors_ellipsoid(double eigenvectors[][DIM*DIM], double4 n[], int N)
{
for (int alpha = 0; alpha < N; alpha++)
{
if (n[alpha].x == 1.0)
{
//v1:
eigenvectors[alpha][0] = 1.0;
eigenvectors[alpha][1] = 0.0;
eigenvectors[alpha][2] = 0.0;
//v2:
eigenvectors[alpha][3] = 0.0;
eigenvectors[alpha][4] = 1.0;
eigenvectors[alpha][5] = 0.0;
//v3:
eigenvectors[alpha][6] = 0.0;
eigenvectors[alpha][7] = 0.0;
eigenvectors[alpha][8] = 1.0;
}
else if (n[alpha].x == -1.0)
{
//v1:
eigenvectors[alpha][0] = -1.0;
eigenvectors[alpha][1] = 0.0;
eigenvectors[alpha][2] = 0.0;
//v2:
eigenvectors[alpha][3] = 0.0;
eigenvectors[alpha][4] = -1.0;
eigenvectors[alpha][5] = 0.0;
//v3:
eigenvectors[alpha][6] = 0.0;
eigenvectors[alpha][7] = 0.0;
eigenvectors[alpha][8] = 1.0;
}
else
{
//v1:
eigenvectors[alpha][0] = n[alpha].x;
eigenvectors[alpha][1] = n[alpha].y;
eigenvectors[alpha][2] = n[alpha].z;
double denom = sqrt(1.0 - n[alpha].x * n[alpha].x );
//v2:
eigenvectors[alpha][3] = 0.0;
eigenvectors[alpha][4] = -n[alpha].z / denom;
eigenvectors[alpha][5] = n[alpha].y / denom;
//v3:
eigenvectors[alpha][6] = 1.0 - n[alpha].x * n[alpha].x;
eigenvectors[alpha][7] = -(n[alpha].x * n[alpha].y) / denom;
eigenvectors[alpha][8] = -(n[alpha].x * n[alpha].z) / denom;
}
}
return;
}
//Prints simulation input to file
void print_to_file_input(
int sim_num, int case_num,
double dt, double time_save, double start_time, double final_time,
int N, double l, double d,
double C, double L, double R,
double epsilon_r, double sigma_bdy,
double inverse_Pe_T, double inverse_Pe_parallel, double inverse_Pe_perp, double inverse_Pe_R,
double delta_run, double delta_tumble, double avg_n_tumble, double std_n_tumble)
{
ofstream fout;
char file_name2[100];
sprintf(file_name2,"SimulationInput.txt");
fout.open(file_name2);
if (fout.fail())
{
cout<<"Output file opening failed.\n";
exit(1);
}
fout.setf(ios::fixed);
fout.setf(ios::showpoint);
fout.precision(30);
string headers("sim_num, case_num, dt, time_save, start_time, final_time, N, l, d, C, L, R, epsilon_r, sigma_bdy, inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R, delta_run, delta_tumble, avg_n_tumble, std_n_tumble");
fout << headers << endl;
fout << sim_num << ", "
<< case_num << ", "
<< dt << ", "
<< time_save << ", "
<< start_time << ", "
<< final_time << ", "
<< N << ", "
<< l << ", "
<< d << ", "
<< C << ", "
<< L << ", "
<< R << ", "
<< epsilon_r << ", "
<< sigma_bdy << ", "
<< inverse_Pe_T << ", "
<< inverse_Pe_parallel << ", "
<< inverse_Pe_perp << ", "
<< inverse_Pe_R << ", "
<< delta_run << ", "
<< delta_tumble << ", "
<< avg_n_tumble << ", "
<< std_n_tumble << endl;
fout.close();
return;
}
//Prints output to file
void print_to_file_output(int sim_num, int case_num, int itime, int N,
double4 x[], double4 n[], double t_run[])
{
double eig_vec[N][DIM * DIM]; //dimensionless Cartesian vector components of the eigenvectors for the orientation of the bacteria
eigenvectors_ellipsoid(eig_vec, n, N);
ofstream fout;
char file_name2[100];
sprintf(file_name2,"sim%d_case%d_timestep%015d.txt", sim_num, case_num, itime);
fout.open(file_name2);
if (fout.fail())
{
cout<<"Output file opening failed.\n";
exit(1);
}
fout.setf(ios::fixed);
fout.setf(ios::showpoint);
fout.precision(15);
string headers("Centroid_1, Centroid_2, Centroid_3, DirVector1_1, DirVector1_2, DirVector1_3, DirVector2_1, DirVector2_2, DirVector2_3, DirVector3_1, DirVector3_2, DirVector3_3, SemiAxis1, SemiAxis2, SemiAxis3, tRun");
fout << headers << endl;
for (int alpha = 0; alpha < N; alpha++)
{
fout << x[alpha].x << ", "
<< x[alpha].y << ", "
<< x[alpha].z << ", ";
for (int nCol = 0; nCol < DIM*DIM; nCol++)
{
fout << eig_vec[alpha][nCol] << ", ";
}
fout << x[alpha].w << ", "
<< n[alpha].w << ", "
<< n[alpha].w << ", "
<< t_run[alpha] << endl;
}
fout.close();
return;
}
//====================================================================================
int main(void)
{
//-----Start: simulation input-----
int sim_num; //simulation number
int case_num; //case number
double dt; //dimensionless time step
double time_save; //dimensionless time at which to output
double start_time; //dimensionless start time of simulation
double final_time; //dimensionless final time of simulation
int N; //number of bacteria in simulation
double l; //half-length of bacteria
double d; //half-diameter of bacteria
double C; //wall surface displacement from origin
double L; //wall length (a multiple of lambda)
double R;
double epsilon_r;
double sigma_bdy; //range parameter for bacteria-wall steric repulsion
double inverse_Pe_T;
double inverse_Pe_parallel;
double inverse_Pe_perp;
double inverse_Pe_R;
double delta_run; //run time
double delta_tumble; //tumble time
double avg_n_tumble; //average tumbling angle in degrees
double std_n_tumble; //std tumbling angle in degrees
load_textfile_sim_parameters( "bacteria_surface_input.txt",
sim_num, case_num,
dt, time_save, start_time, final_time,
N, l, d,
C, L, R,
epsilon_r, sigma_bdy,
inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R,
delta_run, delta_tumble, avg_n_tumble, std_n_tumble);
L = L * 2.0 * R;
cout.setf(ios::fixed);
cout.setf(ios::showpoint);
cout.precision(15);
cout << endl<<"==============="<<endl
<< "sim_num = " << sim_num << endl
<< "case_num = " << case_num << endl
<< "dt = " << dt << endl
<< "time_save = " << time_save << endl
<< "start_time = " << start_time << endl
<< "final_time = " << final_time << endl
<< "N = " << N << endl
<< "l = " << l << endl
<< "d = " << d << endl
<< "C = " << C << endl
<< "L = " << L << endl
<< "R = " << R << endl
<< "epsilon_r = " << epsilon_r << endl
<< "sigma_bdy = " << sigma_bdy << endl
<< "inverse_Pe_T = " << inverse_Pe_T << endl
<< "inverse_Pe_parallel = " << inverse_Pe_parallel << endl
<< "inverse_Pe_perp = " << inverse_Pe_perp << endl
<< "inverse_Pe_R = " << inverse_Pe_R << endl
<< "delta_run = "<< delta_run << endl
<< "delta_tumble = " << delta_tumble << endl
<< "avg_n_tumble = " << avg_n_tumble << endl
<< "std_n_tumble = " << std_n_tumble << endl
<< "================"<<endl;
cout.precision(15);
print_to_file_input(sim_num, case_num, dt, time_save, start_time, final_time,
N, l, d, C, L, R, epsilon_r, sigma_bdy,
inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R, delta_run, delta_tumble, avg_n_tumble, std_n_tumble);
//-----End: simulation input-----
//-----Start: declaring derived simulation parameters-----
//simulation variables:
int time_steps = ceil((final_time - start_time) / dt); //number of simulation time steps
int timestep_save; //number of simulation time steps until save output
timestep_save = ceil(time_save / dt);
double4 x[N]; //dimensionless Cartesian coordinates of the bacteria & dimensionless half-length of the bacteria
double4 n[N]; //dimensionless Cartesian vector components of the orientation vector of the bacteria & dimensionless half-diameter of the bacteria
double t_run[N]; //run time of the bacteria
double t_tumble[N]; //tumble time of bacteria
int tumble_flag[N]; //tumble flag of bacteria (if tumble_flag[alpha] = 1, then bacteria tumbles; otherwise it runs)
memset(x, 0, N * sizeof(double4));
memset(n, 0, N * sizeof(double4));
memset(t_run, 0, N * sizeof(double));
memset(t_tumble, 0, N * sizeof(double));
memset(tumble_flag, 0, N * sizeof(int));
//-----End: declaring derived simulation parameters-----
//-----Start: INITIALIZING-----
//-----Start: initial positions, orientations, and run time-----
initial_loading(x, n, N, C, L, l, d, t_run, t_tumble, delta_run, (final_time - start_time));
//-----End: initial positions, orientations, and run time-----
//-----Start: print initial positions and orientations-----
print_to_file_output(sim_num, case_num, 0, N, x, n, t_run);
//-----End: print initial positions and orientations-----
//-----Start: set up cuda variables-----
// calculate number of blocks and threads needed
int num_BLOCKS, num_THREADS;
if (N < THREADS_PER_BLOCK)
{
num_BLOCKS = 1;
num_THREADS = N;
}
else
{
num_BLOCKS = 1 + (N - 1)/THREADS_PER_BLOCK; //ceiling, use only if h_N != 0
num_THREADS = THREADS_PER_BLOCK;
}
// declare GPU memory pointers
double4 *d_x;
double4 *d_n;
double3 *d_dUbdy_dxa;
double3 *d_dUbdy_dna;
double *d_t_run;
double *d_t_tumble;
int *d_tumble_flag;
float4 *d_random_numbers_noise;
// allocate GPU memory
hipMalloc((void**) &d_x, N * sizeof(double4));
hipMalloc((void**) &d_n, N * sizeof(double4));
hipMalloc((void**) &d_dUbdy_dxa, N * sizeof(double3));
hipMalloc((void**) &d_dUbdy_dna, N * sizeof(double3));
hipMalloc((void**) &d_t_run, N * sizeof(double));
hipMalloc((void**) &d_t_tumble, N * sizeof(double));
hipMalloc((void**) &d_tumble_flag, N * sizeof(int));
hipMalloc((void **)&d_random_numbers_noise, N * sizeof(float4));
// transfer the array to the GPU
hipMemcpy(d_x, x, N * sizeof(double4), hipMemcpyHostToDevice);
hipMemcpy(d_n, n, N * sizeof(double4), hipMemcpyHostToDevice);
hipMemcpy(d_t_run, t_run, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_t_tumble, t_tumble, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_tumble_flag, tumble_flag, N * sizeof(int), hipMemcpyHostToDevice);
//random number generators:
hiprandState_t *d_CurandStates;
hiprandStatePhilox4_32_10_t *d_PHILOXStates;
hipMalloc((void **) &d_CurandStates, N * sizeof(hiprandState_t));
hipMalloc((void **) &d_PHILOXStates, N * sizeof(hiprandStatePhilox4_32_10_t));
// setup seeds
hipLaunchKernelGGL(( init), dim3(num_BLOCKS), dim3(num_THREADS) , 0, 0, seed, d_CurandStates);
hipLaunchKernelGGL(( init), dim3(num_BLOCKS), dim3(num_THREADS) , 0, 0, seed, d_PHILOXStates);
//-----End: set up cuda variables-----
cout << "End: INITIALIZING" << endl;
//-----End: INITIALIZING-----
//-----Start: DYNAMICS LOOP-----
int itime = floor(start_time / dt) + 1;
cout << "itime: " << itime << endl;
cout << "time_steps: " << time_steps << endl;
while (itime <= time_steps)
{
//-----Start: random numbers -----
hipLaunchKernelGGL(( generate_random_numbers_noise), dim3(num_BLOCKS), dim3(num_THREADS) , 0, 0, d_PHILOXStates, d_random_numbers_noise);
//-----End: random numbers -----
//-----Start: boundary interactions-----
hipLaunchKernelGGL(( calculate_BodyWallInteraction), dim3(num_BLOCKS), dim3(num_THREADS) , 0, 0, d_dUbdy_dxa,
d_dUbdy_dna, d_x, d_n,
sigma_bdy, R, C, N);
//-----End: boundary interactions-----
//-----Start: time-marching-----
hipLaunchKernelGGL(( time_marching), dim3(num_BLOCKS), dim3(num_THREADS) , 0, 0,
d_x, d_n,
d_dUbdy_dxa, d_dUbdy_dna,
epsilon_r,
inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R,
dt, N, L,
d_t_run, d_t_tumble, d_tumble_flag,
delta_run, delta_tumble,
avg_n_tumble, std_n_tumble,
d_CurandStates, d_random_numbers_noise);
//-----End: time-marching-----
//-----Start: saving variables-----
if ( itime % timestep_save == 0)
{
// copy back the result array to the CPU
hipMemcpy(x, d_x, N * sizeof(double4), hipMemcpyDeviceToHost);
hipMemcpy(n, d_n, N * sizeof(double4), hipMemcpyDeviceToHost);
hipMemcpy(t_run, d_t_run, N * sizeof(double), hipMemcpyDeviceToHost);
print_to_file_output(sim_num, case_num, itime, N, x, n, t_run);
}
//-----End: saving variables-----
printf("\ntime step: %d", itime);
itime++;
}
cout << endl << endl;
//-----End: DYNAMICS LOOP-----
return 0;
}
| 9cb5a0202d519daf9c29dcc49bf2567caa4ad9b9.cu | #include <iostream>
#include <stdio.h>
#include <cmath>
#include <math.h>
#include <cstdlib>
#include <ctime>
#include <fstream>
#include <cstring>
#include <string>
#include <algorithm>
#include <random>
#include <numeric>
#include <time.h>
#include <curand.h>
#include <curand_kernel.h>
//#include <thrust/sort.h>
//#include <thrust/execution_policy.h>
//bacteria surface geometry, UUU boundary semi-circle, Brownian noise
//Last updated: March 10, 2021
using namespace std;
//set random seed for uniform distribution for angles
unsigned int seed = time(NULL);
default_random_engine engine(seed);
#define THREADS_PER_BLOCK 128
#define TILE_SIZE 128
#define PI 3.14159265358979
#define K_B 1.38064852E-23 //m^2 kg s^-2 K^-1
#define DIM 3
//====================================================================================
//Returns the inverse parallel geometric factor for the
// translational friction tensor. a is the aspect ratio: a=l/d
__device__ double inverse_parallel_geo_factor(double a)
{
double inverse_parallel = (log(a) - 0.207 + 0.980 / a - 0.133 / (a * a))
* (1.0 / (2.0 * PI * a));
return inverse_parallel;
}
//Returns the inverse perpendicular geometric factor for the
// translational friction tensor. a is the aspect ratio: a=l/d
__device__ double inverse_perpendicular_geo_factor(double a)
{
double inverse_perp = (log(a) + 0.839 + 0.185 / a + 0.233 / (a * a))
* ( 1.0 / (4.0 * PI * a));
return inverse_perp;
}
//Returns the rotational geometric factor for the
// rotation friction tensor. a is the aspect ratio: a=l/d
__device__ double inverse_rotation_geo_factor(double a)
{
double inverse_rotation = (log(a) - 0.662 + 0.917 / a - 0.050 / (a * a))
* ( 3.0 / (PI * a * a));
return inverse_rotation;
}
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, curandState *state) {
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(seed, id, 0, &state[id]);
}
__global__ void init(unsigned int seed, curandStatePhilox4_32_10_t *state) {
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(seed, id, 0, &state[id]);
}
__global__ void generate_random_numbers_noise(curandStatePhilox4_32_10_t *state, float4 *numbers) {
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
/* Copy state to local memory for efficiency */
curandStatePhilox4_32_10_t localState = state[id];
numbers[id] = curand_normal4(&localState);
/* Copy state back to global memory */
state[id] = localState;
}
__device__ double ys_fcn(double4 xa, double xs, double R, double C)
{
double ys;
double param = (2.0 * R / PI) * acos(cos(PI * xs / (2.0 * R))) - R;
if (xa.y > 0.0)
{
ys = sqrt(R * R - param * param) + C;
}
else
{
ys = -sqrt(R * R - param * param) - C;
}
return ys;
}
__device__ double dysdxs_fcn(double xs, double ys, double R)
{
double dysdxs;
double numer = R * (PI - 2.0 * acos(cos(PI * xs / (2.0 * R)))) * sin(PI * xs / (2.0 * R));
double denom = sqrt(2.0) * sqrt(R * R * (PI - acos(cos(PI * xs / (2.0 * R)))) * acos(cos(PI * xs / (2.0 * R)))) *
sqrt(1.0 - cos(PI * xs / R));
if (ys > 0)
{
dysdxs = numer / denom;
}
else
{
dysdxs = -numer / denom;
}
return dysdxs;
}
__device__ double d2ysdxs2_fcn(double xs, double ys, double R)
{
double d2ysdxs2;
double numer = PI * PI * PI * R * R;
double denom_param1 = (asin(cos(PI * xs / (2.0 * R)))) * (asin(cos(PI * xs / (2.0 * R))));
double denom_param2 = sqrt(R * R * (PI * PI - 4.0 * denom_param1));
double denom = denom_param2 * denom_param2 * denom_param2;
if (ys > 0)
{
d2ysdxs2 = -numer / denom;
}
else
{
d2ysdxs2 = numer / denom;
}
return d2ysdxs2;
}
__device__ double func(double4 xa, double xs, double R, double C)
{
double ys = ys_fcn(xa, xs, R, C);
double dysdxs = dysdxs_fcn(xs, ys, R);
double func_value = -xa.x + xs + (ys - xa.y) * dysdxs;
return func_value;
}
__device__ double dfunc_dx(double4 xa, double xs, double R, double C)
{
double ys = ys_fcn(xa, xs, R, C);
double dysdxs = dysdxs_fcn(xs, ys, R);
double d2ysdxs2 = d2ysdxs2_fcn(xs, ys, R);
double dfunc_dx_value = 1.0 + dysdxs * dysdxs + (ys - xa.y) * d2ysdxs2;
return dfunc_dx_value;
}
__device__ double3 SurfaceNormal(double xs, double ys, double R)
{
double dysdxs = dysdxs_fcn( xs, ys, R );
double denom = sqrt(dysdxs * dysdxs + 1.0);
double dysplusdxs = dysdxs_fcn( xs, 1.0, R );
double3 N;
N.x = dysplusdxs / denom;
N.z = 0.0;
if (ys > 0.0)
{
N.y = -1.0 / denom;
}
else
{
N.y = 1.0 / denom;
}
return N;
}
__device__ double bisection(double xl, double xu, double4 xa, double R, double C)
{
double es = 0.5; //percent
int imax = 20;
double fl = func(xa, xl, R, C);
double fu = func(xa, xu, R, C);
double xs;
if (fl * fu < 0.0)
{
double ea = 1.0;
int iter = 0;
double xr = xl;
double test, fr, xrold;
while (iter < imax && ea > es)
{
xrold = xr;
xr = (xl + xu) / 2.0;
fr = func(xa, xr, R, C);
test = fl * fr;
ea = abs((xr - xrold) / xr) * 100.0;
if (test < 0.0)
{
xu = xr;
}
else
{
xl = xr;
fl = fr;
}
iter++;
}
xs = xr;
}
else
{
xs = sqrt(-1.0);
}
return xs;
}
__device__ bool isNaN(double s)
{
// http.developer.nvidia.com/Cg/isnan.html
return s != s;
}
__device__ double3 PointOnSurface(double4 xa, double R, double C)
{
double xlower_bound, xupper_bound;
double xl1, xu1, xs1, xl2, xu2, xs2, chck1, chck2, xs, ys;
double3 S;
//bounds are always set to be within the semi-circle the point is in
double x_star = fmod(xa.x, (2.0 * R));
xlower_bound = xa.x - 0.99 * x_star;
xupper_bound = xa.x + 0.99 * ((2.0 * R) - x_star);
xl1 = xlower_bound;
xu1 = xa.x;
xl2 = xa.x;
xu2 = xupper_bound;
//bisection on each section:
xs1 = bisection(xl1, xu1, xa, R, C);
xs2 = bisection(xl2, xu2, xa, R, C);
//check roots
if (isNaN(xs1) == 1)
{
chck1 = -1;
}
else
{
chck1 = dfunc_dx(xa, xs1, R, C);
}
if (isNaN(xs2) == 1)
{
chck2 = -1;
}
else
{
chck2 = dfunc_dx(xa, xs2, R, C);
}
if (chck1 > 0)
{
xs = xs1;
}
else if (chck2 > 0)
{
xs = xs2;
}
else
{
xs = xa.x;
}
ys = ys_fcn(xa, xs, R, C);
S.x = xs;
S.y = ys;
S.z = 0.0;
return S;
}
__device__ double2 dxsys_dxa(double3 S, double R, int i)
{
double2 dxsysdxa;
double dysdxs = dysdxs_fcn(S.x, S.y, R);
if (i == 1)
{
dxsysdxa.x = 1.0;
dxsysdxa.y = 1.0 / dysdxs;
}
else if (i == 2)
{
dxsysdxa.x = dysdxs;
dxsysdxa.y = 1.0;
}
return dxsysdxa;
}
__device__ double2 dN_dxs(double xs, double ys, double R)
{
double2 dNdxs;
dNdxs.x = - 1.0 / R;
double numer1 = sqrt(1.0 / (PI * PI - 4.0 * (asin(cos(PI * xs / (2.0 * R)))) * (asin(cos(PI * xs / (2.0 * R)))) ));
double numer = 4.0 * asin(cos(PI * xs / (2.0 * R))) * numer1 * sin(PI * xs / (2.0 * R));
double denom = R * sqrt(2.0 - 2.0 * cos(PI * xs / R));
dNdxs.y = - numer / denom;
if (ys < 0.0)
{
dNdxs.y = numer / denom;
}
return dNdxs;
}
__global__ void calculate_BodyWallInteraction(double3 *d_dUbdy_dxa,
double3 *d_dUbdy_dna, double4 *d_x, double4 *d_n,
double sigma_bdy, double R, double C, int N)
{
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid < N)
{
double3 dUbdy_dxa, dUbdy_dna;
dUbdy_dxa.x = 0.0;
dUbdy_dxa.y = 0.0;
dUbdy_dxa.z = 0.0;
dUbdy_dna.x = 0.0;
dUbdy_dna.y = 0.0;
dUbdy_dna.z = 0.0;
double4 xa = d_x[gtid];
double4 na = d_n[gtid];
double la = xa.w;
double da = na.w;
double y = ys_fcn(xa, xa.x, R, C);
double chk1 = abs(xa.y);
double chk2 = abs(y) - (la + da);
if (chk1 > chk2)
{
double tol = 1.0 * da; //bacteria width
double x_star = fmod(xa.x, (2.0 * R));
if (x_star <= tol || ((2.0 * R) - x_star) <= tol) //bacteria is near peak
{
if (abs(1.0 - abs(na.y)) < 0.2) //bacteria near peak and vertical => treat like flat boundry
{
double3 S, W_hat;
if (xa.y <= 0.0) //bottom surface
{
W_hat.x = 0.0;
W_hat.y = 1.0;
W_hat.z = 0.0;
S.x = xa.x;
S.y = -abs(C);
S.z = 0.0;
}
else // top surface
{
W_hat.x = 0.0;
W_hat.y = -1.0;
W_hat.z = 0.0;
S.x = xa.x;
S.y = abs(C);
S.z = 0.0;
}
double dot_na_W_hat, dot_xa_W_hat, dot_W_hat_S, r_alpha;
dot_na_W_hat = na.x * W_hat.x + na.y * W_hat.y + na.z * W_hat.z;
dot_xa_W_hat = xa.x * W_hat.x + xa.y * W_hat.y + xa.z * W_hat.z;
dot_W_hat_S = W_hat.x * S.x + W_hat.y * S.y + W_hat.z * S.z;
r_alpha = la * abs(dot_na_W_hat) + da - dot_xa_W_hat + dot_W_hat_S;
double dUbdy_dralpha;
double3 dralpha_dna;
if (r_alpha > 0.0) //contact with boundary
{
dUbdy_dralpha = 0.01 * (1.0 / sigma_bdy) * exp(r_alpha / sigma_bdy);
//0.01 factor to reduce the effect of the flat boundary
//boundary force derivatives:
dUbdy_dxa.x = dUbdy_dralpha * -W_hat.x;
dUbdy_dxa.y = dUbdy_dralpha * -W_hat.y;
dUbdy_dxa.z = dUbdy_dralpha * -W_hat.z;
//boundary orientation derivatives:
if (dot_na_W_hat == 0.0)
{
dUbdy_dna.x = 0.0;
dUbdy_dna.y = 0.0;
dUbdy_dna.z = 0.0;
}
else
{
dralpha_dna.x = (la * dot_na_W_hat / abs(dot_na_W_hat)) * W_hat.x;
dralpha_dna.y = (la * dot_na_W_hat / abs(dot_na_W_hat)) * W_hat.y;
dralpha_dna.z = (la * dot_na_W_hat / abs(dot_na_W_hat)) * W_hat.z;
dUbdy_dna.x = dUbdy_dralpha * dralpha_dna.x;
dUbdy_dna.y = dUbdy_dralpha * dralpha_dna.y;
dUbdy_dna.z = dUbdy_dralpha * dralpha_dna.z;
}
}
}
}
else
{
double3 S, Nhat; // point on the surface closest to bacteria
S = PointOnSurface(xa, R, C);
Nhat = SurfaceNormal(S.x, S.y, R);
double3 xa_S;
xa_S.x = xa.x - S.x;
xa_S.y = xa.y - S.y;
xa_S.z = xa.z - S.z;
double dot_na_Nhat, dot_Nhat_xa_S, r_alpha;
dot_na_Nhat = na.x * Nhat.x + na.y * Nhat.y + na.z * Nhat.z;
dot_Nhat_xa_S = Nhat.x * xa_S.x + Nhat.y * xa_S.y + Nhat.z * xa_S.z;
r_alpha = la * abs(dot_na_Nhat) + da - dot_Nhat_xa_S;
if (r_alpha > 0.0) //contact with boundary
{
double dUbdy_dralpha;
double3 dralpha_dna, dralpha_dxa;
dUbdy_dralpha = (1.0 / sigma_bdy) * exp(r_alpha / sigma_bdy);
double2 dNdxs = dN_dxs(S.x, S.y, R);
double2 dxys_dxa1 = dxsys_dxa(S, R, 1);
double2 dxys_dxa2 = dxsys_dxa(S, R, 2);
double dysdxs = dysdxs_fcn(S.x, S.y, R);
double dxsdys = 1.0 / dysdxs;
double c1 = la * dot_na_Nhat / abs(dot_na_Nhat);
double c2 = (xa.x - S.x) * dNdxs.x;
double c3 = (xa.y - S.y) * dNdxs.y;
dralpha_dxa.x = c1 * (na.x * dNdxs.x + na.y * dNdxs.y) * dxys_dxa1.x
- (c2 + c3) * dxys_dxa1.x
- Nhat.x
+ (Nhat.x + Nhat.y * dysdxs) * dxys_dxa1.x
+ (Nhat.x * dxsdys + Nhat.y) * dxys_dxa1.y;
dralpha_dxa.y = c1 * (na.x * dNdxs.x + na.y * dNdxs.y) * dxys_dxa2.x
- (c2 + c3) * dxys_dxa2.x
- Nhat.y
+ (Nhat.x + Nhat.y * dysdxs) * dxys_dxa2.x
+ (Nhat.x * dxsdys + Nhat.y) * dxys_dxa2.y;
dralpha_dxa.z = 0.0;
//boundary force derivatives:
dUbdy_dxa.x = dUbdy_dralpha * dralpha_dxa.x;
dUbdy_dxa.y = dUbdy_dralpha * dralpha_dxa.y;
dUbdy_dxa.z = dUbdy_dralpha * dralpha_dxa.z;
//boundary orientation derivatives:
if (dot_na_Nhat == 0.0)
{
dUbdy_dna.x = 0.0;
dUbdy_dna.y = 0.0;
dUbdy_dna.z = 0.0;
}
else
{
dralpha_dna.x = c1 * Nhat.x;
dralpha_dna.y = c1 * Nhat.y;
dralpha_dna.z = c1 * Nhat.z;
dUbdy_dna.x = dUbdy_dralpha * dralpha_dna.x;
dUbdy_dna.y = dUbdy_dralpha * dralpha_dna.y;
dUbdy_dna.z = dUbdy_dralpha * dralpha_dna.z;
}
}
}
}
// Save the result in global memory for the integration step
d_dUbdy_dxa[gtid] = dUbdy_dxa;
d_dUbdy_dna[gtid] = dUbdy_dna;
}
}
__global__ void time_marching(double4 *d_x, double4 *d_n,
double3 *d_dUbdy_dxa, double3 *d_dUbdy_dna,
double epsilon_r,
double inverse_Pe_T, double inverse_Pe_parallel, double inverse_Pe_perp, double inverse_Pe_R,
double dt, int N, double L,
double *d_t_run, double *d_t_tumble, int *d_tumble_flag,
double delta_run, double delta_tumble,
double avg_n_tumble, double std_n_tumble,
curandState *state, float4 *d_random_numbers_noise)
{
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid < N)
{
double4 xa = d_x[gtid];
double4 na = d_n[gtid];
float4 random_numbers_noise = d_random_numbers_noise[gtid];
double la = xa.w;
double da = na.w;
double3 dUbdy_dxa = d_dUbdy_dxa[gtid];
double3 dUbdy_dna = d_dUbdy_dna[gtid];
double4 x_new;
double4 n_new;
//-----Start: creating orientation and orientation projection matrix-----
double ori_matrix[DIM][DIM];
ori_matrix[0][0] = na.x * na.x;
ori_matrix[1][1] = na.y * na.y;
ori_matrix[2][2] = na.z * na.z;
ori_matrix[0][1] = na.x * na.y;
ori_matrix[0][2] = na.x * na.z;
ori_matrix[1][2] = na.y * na.z;
ori_matrix[1][0] = ori_matrix[0][1];
ori_matrix[2][0] = ori_matrix[0][2];
ori_matrix[2][1] = ori_matrix[1][2];
double ori_proj_matrix[DIM][DIM];
ori_proj_matrix[0][0] = 1.0 - na.x * na.x;
ori_proj_matrix[1][1] = 1.0 - na.y * na.y;
ori_proj_matrix[2][2] = 1.0 - na.z * na.z;
ori_proj_matrix[0][1] = 0.0 - na.x * na.y;
ori_proj_matrix[0][2] = 0.0 - na.x * na.z;
ori_proj_matrix[1][2] = 0.0 - na.y * na.z;
ori_proj_matrix[1][0] = ori_proj_matrix[0][1];
ori_proj_matrix[2][0] = ori_proj_matrix[0][2];
ori_proj_matrix[2][1] = ori_proj_matrix[1][2];
//-----End: creating orientation and orientation projection matrix-----
//-----Start: time-marching + tumbling dynamics-----
if (d_tumble_flag[gtid] == 1) //tumbling
{
d_t_tumble[gtid] += dt;
if (d_t_tumble[gtid] < delta_tumble) //don't move
{
x_new.x = xa.x;
x_new.y = xa.y;
x_new.z = xa.z;
x_new.w = xa.w;
n_new.x = na.x;
n_new.y = na.y;
n_new.z = na.z;
n_new.w = na.w;
}
else //tumble
{
d_tumble_flag[gtid] = 0;
d_t_tumble[gtid] = 0.0;
float angle;
double rad_angle;
curandState localState = state[gtid];
angle = curand_normal(&localState);
angle = angle * std_n_tumble + avg_n_tumble;
while (angle < 0.0 || angle > 180.0)
{
angle = curand_normal(&localState);
angle = angle * std_n_tumble + avg_n_tumble;
}
double uniform1 = curand_uniform(&localState); //number between 0 and 1
if (uniform1 < 0.5) //otherwise angle is positive
{
angle = -angle;
}
state[gtid] = localState;
rad_angle = angle * PI / 180; //convert to radians
//rotation matrix
double R[2][2];
R[0][0] = cos(rad_angle);
R[0][1] = -sin(rad_angle);
R[1][0] = sin(rad_angle);
R[1][1] = cos(rad_angle);
n_new.x = R[0][0] * na.x + R[0][1] * na.y;
n_new.y = R[1][0] * na.x + R[1][1] * na.y;
n_new.z = 0.0;
n_new.w = da;
x_new.x = xa.x;
x_new.y = xa.y;
x_new.z = xa.z;
x_new.w = xa.w;
}
}
else //run
{
d_t_run[gtid] += dt;
if (d_t_run[gtid] < delta_run) //run
{
//translational dynamics:
//calculating geometric factors:
double aspect = la/da;
double inverse_parallel = inverse_parallel_geo_factor(aspect);
double inverse_perp = inverse_perpendicular_geo_factor(aspect);
double inverse_rotation = inverse_rotation_geo_factor(aspect);
//-----Start: creating Gamma_inverse matrix-----
double Gamma_inverse[DIM][DIM];
for(int i = 0; i < DIM; i++)
{
for(int j = 0; j < DIM; j++)
{
Gamma_inverse[i][j] = inverse_parallel * ori_matrix[i][j]
+ inverse_perp * ori_proj_matrix[i][j];
}
}
//-----End: creating Gamma_inverse matrix-----
//-----Start: creating translational diffusion matrix-----
double Pe_trans_matrix[DIM][DIM];
double sqrt_Pe_inverse_parallel = sqrt(inverse_Pe_parallel);
double sqrt_Pe_inverse_perp = sqrt(inverse_Pe_perp);
for(int i = 0; i < DIM; i++)
{
for(int j = 0; j < DIM; j++)
{
Pe_trans_matrix[i][j] = sqrt_Pe_inverse_parallel * ori_matrix[i][j]
+ sqrt_Pe_inverse_perp * ori_proj_matrix[i][j];
}
}
//-----End: creating translational diffusion matrix-----
//adding it all together:
double3 x_b;
x_b.x = - epsilon_r * (inverse_Pe_T) * dUbdy_dxa.x;
x_b.y = - epsilon_r * (inverse_Pe_T) * dUbdy_dxa.y;
x_b.z = - epsilon_r * (inverse_Pe_T) * dUbdy_dxa.z;
//matrix multiply:
double3 Gamma_inverse_x_b;
Gamma_inverse_x_b.x = Gamma_inverse[0][0] * x_b.x
+ Gamma_inverse[0][1] * x_b.y
+ Gamma_inverse[0][2] * x_b.z;
Gamma_inverse_x_b.y = Gamma_inverse[1][0] * x_b.x
+ Gamma_inverse[1][1] * x_b.y
+ Gamma_inverse[1][2] * x_b.z;
Gamma_inverse_x_b.z = Gamma_inverse[2][0] * x_b.x
+ Gamma_inverse[2][1] * x_b.y
+ Gamma_inverse[2][2] * x_b.z;
//noise:
float3 d_xi;
d_xi.x = random_numbers_noise.x * sqrt(2.0 * dt);
d_xi.y = random_numbers_noise.y * sqrt(2.0 * dt);
d_xi.z = 0.0;
float3 trans_noise;
trans_noise.x = Pe_trans_matrix[0][0] * d_xi.x
+ Pe_trans_matrix[0][1] * d_xi.y
+ Pe_trans_matrix[0][2] * d_xi.z;
trans_noise.y = Pe_trans_matrix[1][0] * d_xi.x
+ Pe_trans_matrix[1][1] * d_xi.y
+ Pe_trans_matrix[1][2] * d_xi.z;
trans_noise.z = Pe_trans_matrix[2][0] * d_xi.x
+ Pe_trans_matrix[2][1] * d_xi.y
+ Pe_trans_matrix[2][2] * d_xi.z;
//time step:
x_new.x = xa.x + na.x * dt + Gamma_inverse_x_b.x * dt + trans_noise.x;
x_new.y = xa.y + na.y * dt + Gamma_inverse_x_b.y * dt + trans_noise.y;
x_new.z = 0.0;
x_new.w = la;
//orientation dynamics
double3 n_b;
int dim = 2;
n_b.x = - epsilon_r * (inverse_Pe_R) * inverse_rotation * dUbdy_dna.x + (1 - dim) * (inverse_Pe_R) * na.x;
n_b.y = - epsilon_r * (inverse_Pe_R) * inverse_rotation * dUbdy_dna.y + (1 - dim) * (inverse_Pe_R) * na.y;
n_b.z = - epsilon_r * (inverse_Pe_R) * inverse_rotation * dUbdy_dna.z + (1 - dim) * (inverse_Pe_R) * na.z;
double3 ori_proj_n_b;
ori_proj_n_b.x = ori_proj_matrix[0][0] * n_b.x
+ ori_proj_matrix[0][1] * n_b.y
+ ori_proj_matrix[0][2] * n_b.z;
ori_proj_n_b.y = ori_proj_matrix[1][0] * n_b.x
+ ori_proj_matrix[1][1] * n_b.y
+ ori_proj_matrix[1][2] * n_b.z;
ori_proj_n_b.z = ori_proj_matrix[2][0] * n_b.x
+ ori_proj_matrix[2][1] * n_b.y
+ ori_proj_matrix[2][2] * n_b.z;
//noise:
float3 d_zeta;
d_zeta.x = random_numbers_noise.z * sqrt(2.0 * (inverse_Pe_R) * dt);
d_zeta.y = random_numbers_noise.w * sqrt(2.0 * (inverse_Pe_R) * dt);
d_zeta.z = 0.0;
double3 ori_noise;
ori_noise.x = ori_proj_matrix[0][0] * d_zeta.x
+ ori_proj_matrix[0][1] * d_zeta.y
+ ori_proj_matrix[0][2] * d_zeta.z;
ori_noise.y = ori_proj_matrix[1][0] * d_zeta.x
+ ori_proj_matrix[1][1] * d_zeta.y
+ ori_proj_matrix[1][2] * d_zeta.z;
ori_noise.z = ori_proj_matrix[2][0] * d_zeta.x
+ ori_proj_matrix[2][1] * d_zeta.y
+ ori_proj_matrix[2][2] * d_zeta.z;
n_new.x = na.x + ori_proj_n_b.x * dt + ori_noise.x;
n_new.y = na.y + ori_proj_n_b.y * dt + ori_noise.y;
n_new.z = 0.0;
n_new.w = da;
}
else
{
d_tumble_flag[gtid] = 1;
d_t_run[gtid] = 0.0;
x_new.x = xa.x;
x_new.y = xa.y;
x_new.z = xa.z;
x_new.w = xa.w;
n_new.x = na.x;
n_new.y = na.y;
n_new.z = na.z;
n_new.w = na.w;
}
}
//-----End: time-marching + tumbling dynamics-----
//normalize n afterwards:
double magn_n_new_Sqrd = n_new.x * n_new.x + n_new.y * n_new.y + n_new.z * n_new.z;
double magn_n_new = sqrt(magn_n_new_Sqrd);
n_new.x = (n_new.x / magn_n_new);
n_new.y = (n_new.y / magn_n_new);
n_new.z = (n_new.z / magn_n_new);
//periodic BC
if (x_new.x < 0.0)
{
x_new.x = L - x_new.x;
}
else if (x_new.x > L)
{
double delta_x = x_new.x - L;
x_new.x = delta_x;
}
// Save the result in global memory
d_x[gtid] = x_new;
d_n[gtid] = n_new;
}
}
//returns the greater common divisor of two numbers
int gcd(int first_number, int second_number)
{
int gcd_value;
for(int i = 1; i <= first_number && i <= second_number; i++)
{
if(first_number % i == 0 && second_number % i == 0 )
{
gcd_value = i;
}
}
return gcd_value;
}
//loads the .txt file that contains the simulation input variables data
void load_textfile_sim_parameters( char filename[],
int& sim_num, int& case_num,
double& dt, double& time_save, double& start_time, double& final_time,
int& N, double& l, double& d,
double& C, double& L, double& R,
double& epsilon_r, double& sigma_bdy,
double& inverse_Pe_T, double& inverse_Pe_parallel, double& inverse_Pe_perp, double& inverse_Pe_R,
double& delta_run, double& delta_tumble, double& avg_n_tumble, double& std_n_tumble)
{
ifstream infile(filename);
if (infile.fail())
{
cout<<"\nSimulation parameters input file opening failed.\n";
exit(1);
}
int number_inputs = 22;
double input_vec[number_inputs];
for (int i = 0; i < number_inputs; i++)
{
infile >> input_vec[i];
}
int i = 0;
sim_num = int(input_vec[i]);
case_num = int(input_vec[++i]);
dt = input_vec[++i];
time_save = input_vec[++i];
start_time = input_vec[++i];
final_time = input_vec[++i];
N = int(input_vec[++i]);
l = input_vec[++i];
d = input_vec[++i];
C = input_vec[++i];
L = input_vec[++i];
R = input_vec[++i];
epsilon_r = input_vec[++i];
sigma_bdy = input_vec[++i];
inverse_Pe_T = input_vec[++i];
inverse_Pe_parallel = input_vec[++i];
inverse_Pe_perp = input_vec[++i];
inverse_Pe_R = input_vec[++i];
delta_run = input_vec[++i];
delta_tumble = input_vec[++i];
avg_n_tumble = input_vec[++i];
std_n_tumble = input_vec[++i];
cout << "\nSimulation parameters loaded\n";
}
void initial_loading(double4 x[], double4 n[], int N, double C, double L,
double l, double d, double t_run[], double t_tumble[], double delta_run, double simulation_time)
{
double factorL = 1.0;
double factorLminus1 = 1.0 - factorL;
double xmin = 0.0 + 0.5 * factorLminus1 * L;
double xmax = L - 0.5 * factorLminus1 * L;
double ymin = -C;
double ymax = C;
uniform_real_distribution<double> uniform_x(xmin, xmax);
uniform_real_distribution<double> uniform_y(ymin, ymax);
uniform_real_distribution<double> uniform_dist_angle(0, 2.0 * PI );
uniform_real_distribution<double> uniform_dist_run_time(0.0, delta_run);
double angle;
for(int alpha = 0; alpha < N; alpha++)
{
//set bacteria dimensions:
x[alpha].w = l;
n[alpha].w = d;
//set initial positions
x[alpha].x = uniform_x(engine);
x[alpha].y = uniform_y(engine);
x[alpha].z = 0.0;
//set initial bacteria orientations:
angle = uniform_dist_angle(engine);
n[alpha].x = cos(angle);
n[alpha].y = sin(angle);
n[alpha].z = 0.0;
//set initial run time
if (delta_run < simulation_time) {
t_run[alpha] = uniform_dist_run_time(engine);
}
else {
t_run[alpha] = 0.0;
}
//set initial tumble time
t_tumble[alpha] = 0.0;
}
return;
}
//Returns the eigenvectors corresponding to the orientation vectors for
// all the bacteria.
void eigenvectors_ellipsoid(double eigenvectors[][DIM*DIM], double4 n[], int N)
{
for (int alpha = 0; alpha < N; alpha++)
{
if (n[alpha].x == 1.0)
{
//v1:
eigenvectors[alpha][0] = 1.0;
eigenvectors[alpha][1] = 0.0;
eigenvectors[alpha][2] = 0.0;
//v2:
eigenvectors[alpha][3] = 0.0;
eigenvectors[alpha][4] = 1.0;
eigenvectors[alpha][5] = 0.0;
//v3:
eigenvectors[alpha][6] = 0.0;
eigenvectors[alpha][7] = 0.0;
eigenvectors[alpha][8] = 1.0;
}
else if (n[alpha].x == -1.0)
{
//v1:
eigenvectors[alpha][0] = -1.0;
eigenvectors[alpha][1] = 0.0;
eigenvectors[alpha][2] = 0.0;
//v2:
eigenvectors[alpha][3] = 0.0;
eigenvectors[alpha][4] = -1.0;
eigenvectors[alpha][5] = 0.0;
//v3:
eigenvectors[alpha][6] = 0.0;
eigenvectors[alpha][7] = 0.0;
eigenvectors[alpha][8] = 1.0;
}
else
{
//v1:
eigenvectors[alpha][0] = n[alpha].x;
eigenvectors[alpha][1] = n[alpha].y;
eigenvectors[alpha][2] = n[alpha].z;
double denom = sqrt(1.0 - n[alpha].x * n[alpha].x );
//v2:
eigenvectors[alpha][3] = 0.0;
eigenvectors[alpha][4] = -n[alpha].z / denom;
eigenvectors[alpha][5] = n[alpha].y / denom;
//v3:
eigenvectors[alpha][6] = 1.0 - n[alpha].x * n[alpha].x;
eigenvectors[alpha][7] = -(n[alpha].x * n[alpha].y) / denom;
eigenvectors[alpha][8] = -(n[alpha].x * n[alpha].z) / denom;
}
}
return;
}
//Prints simulation input to file
void print_to_file_input(
int sim_num, int case_num,
double dt, double time_save, double start_time, double final_time,
int N, double l, double d,
double C, double L, double R,
double epsilon_r, double sigma_bdy,
double inverse_Pe_T, double inverse_Pe_parallel, double inverse_Pe_perp, double inverse_Pe_R,
double delta_run, double delta_tumble, double avg_n_tumble, double std_n_tumble)
{
ofstream fout;
char file_name2[100];
sprintf(file_name2,"SimulationInput.txt");
fout.open(file_name2);
if (fout.fail())
{
cout<<"Output file opening failed.\n";
exit(1);
}
fout.setf(ios::fixed);
fout.setf(ios::showpoint);
fout.precision(30);
string headers("sim_num, case_num, dt, time_save, start_time, final_time, N, l, d, C, L, R, epsilon_r, sigma_bdy, inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R, delta_run, delta_tumble, avg_n_tumble, std_n_tumble");
fout << headers << endl;
fout << sim_num << ", "
<< case_num << ", "
<< dt << ", "
<< time_save << ", "
<< start_time << ", "
<< final_time << ", "
<< N << ", "
<< l << ", "
<< d << ", "
<< C << ", "
<< L << ", "
<< R << ", "
<< epsilon_r << ", "
<< sigma_bdy << ", "
<< inverse_Pe_T << ", "
<< inverse_Pe_parallel << ", "
<< inverse_Pe_perp << ", "
<< inverse_Pe_R << ", "
<< delta_run << ", "
<< delta_tumble << ", "
<< avg_n_tumble << ", "
<< std_n_tumble << endl;
fout.close();
return;
}
//Prints output to file
void print_to_file_output(int sim_num, int case_num, int itime, int N,
double4 x[], double4 n[], double t_run[])
{
double eig_vec[N][DIM * DIM]; //dimensionless Cartesian vector components of the eigenvectors for the orientation of the bacteria
eigenvectors_ellipsoid(eig_vec, n, N);
ofstream fout;
char file_name2[100];
sprintf(file_name2,"sim%d_case%d_timestep%015d.txt", sim_num, case_num, itime);
fout.open(file_name2);
if (fout.fail())
{
cout<<"Output file opening failed.\n";
exit(1);
}
fout.setf(ios::fixed);
fout.setf(ios::showpoint);
fout.precision(15);
string headers("Centroid_1, Centroid_2, Centroid_3, DirVector1_1, DirVector1_2, DirVector1_3, DirVector2_1, DirVector2_2, DirVector2_3, DirVector3_1, DirVector3_2, DirVector3_3, SemiAxis1, SemiAxis2, SemiAxis3, tRun");
fout << headers << endl;
for (int alpha = 0; alpha < N; alpha++)
{
fout << x[alpha].x << ", "
<< x[alpha].y << ", "
<< x[alpha].z << ", ";
for (int nCol = 0; nCol < DIM*DIM; nCol++)
{
fout << eig_vec[alpha][nCol] << ", ";
}
fout << x[alpha].w << ", "
<< n[alpha].w << ", "
<< n[alpha].w << ", "
<< t_run[alpha] << endl;
}
fout.close();
return;
}
//====================================================================================
int main(void)
{
//-----Start: simulation input-----
int sim_num; //simulation number
int case_num; //case number
double dt; //dimensionless time step
double time_save; //dimensionless time at which to output
double start_time; //dimensionless start time of simulation
double final_time; //dimensionless final time of simulation
int N; //number of bacteria in simulation
double l; //half-length of bacteria
double d; //half-diameter of bacteria
double C; //wall surface displacement from origin
double L; //wall length (a multiple of lambda)
double R;
double epsilon_r;
double sigma_bdy; //range parameter for bacteria-wall steric repulsion
double inverse_Pe_T;
double inverse_Pe_parallel;
double inverse_Pe_perp;
double inverse_Pe_R;
double delta_run; //run time
double delta_tumble; //tumble time
double avg_n_tumble; //average tumbling angle in degrees
double std_n_tumble; //std tumbling angle in degrees
load_textfile_sim_parameters( "bacteria_surface_input.txt",
sim_num, case_num,
dt, time_save, start_time, final_time,
N, l, d,
C, L, R,
epsilon_r, sigma_bdy,
inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R,
delta_run, delta_tumble, avg_n_tumble, std_n_tumble);
L = L * 2.0 * R;
cout.setf(ios::fixed);
cout.setf(ios::showpoint);
cout.precision(15);
cout << endl<<"==============="<<endl
<< "sim_num = " << sim_num << endl
<< "case_num = " << case_num << endl
<< "dt = " << dt << endl
<< "time_save = " << time_save << endl
<< "start_time = " << start_time << endl
<< "final_time = " << final_time << endl
<< "N = " << N << endl
<< "l = " << l << endl
<< "d = " << d << endl
<< "C = " << C << endl
<< "L = " << L << endl
<< "R = " << R << endl
<< "epsilon_r = " << epsilon_r << endl
<< "sigma_bdy = " << sigma_bdy << endl
<< "inverse_Pe_T = " << inverse_Pe_T << endl
<< "inverse_Pe_parallel = " << inverse_Pe_parallel << endl
<< "inverse_Pe_perp = " << inverse_Pe_perp << endl
<< "inverse_Pe_R = " << inverse_Pe_R << endl
<< "delta_run = "<< delta_run << endl
<< "delta_tumble = " << delta_tumble << endl
<< "avg_n_tumble = " << avg_n_tumble << endl
<< "std_n_tumble = " << std_n_tumble << endl
<< "================"<<endl;
cout.precision(15);
print_to_file_input(sim_num, case_num, dt, time_save, start_time, final_time,
N, l, d, C, L, R, epsilon_r, sigma_bdy,
inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R, delta_run, delta_tumble, avg_n_tumble, std_n_tumble);
//-----End: simulation input-----
//-----Start: declaring derived simulation parameters-----
//simulation variables:
int time_steps = ceil((final_time - start_time) / dt); //number of simulation time steps
int timestep_save; //number of simulation time steps until save output
timestep_save = ceil(time_save / dt);
double4 x[N]; //dimensionless Cartesian coordinates of the bacteria & dimensionless half-length of the bacteria
double4 n[N]; //dimensionless Cartesian vector components of the orientation vector of the bacteria & dimensionless half-diameter of the bacteria
double t_run[N]; //run time of the bacteria
double t_tumble[N]; //tumble time of bacteria
int tumble_flag[N]; //tumble flag of bacteria (if tumble_flag[alpha] = 1, then bacteria tumbles; otherwise it runs)
memset(x, 0, N * sizeof(double4));
memset(n, 0, N * sizeof(double4));
memset(t_run, 0, N * sizeof(double));
memset(t_tumble, 0, N * sizeof(double));
memset(tumble_flag, 0, N * sizeof(int));
//-----End: declaring derived simulation parameters-----
//-----Start: INITIALIZING-----
//-----Start: initial positions, orientations, and run time-----
initial_loading(x, n, N, C, L, l, d, t_run, t_tumble, delta_run, (final_time - start_time));
//-----End: initial positions, orientations, and run time-----
//-----Start: print initial positions and orientations-----
print_to_file_output(sim_num, case_num, 0, N, x, n, t_run);
//-----End: print initial positions and orientations-----
//-----Start: set up cuda variables-----
// calculate number of blocks and threads needed
int num_BLOCKS, num_THREADS;
if (N < THREADS_PER_BLOCK)
{
num_BLOCKS = 1;
num_THREADS = N;
}
else
{
num_BLOCKS = 1 + (N - 1)/THREADS_PER_BLOCK; //ceiling, use only if h_N != 0
num_THREADS = THREADS_PER_BLOCK;
}
// declare GPU memory pointers
double4 *d_x;
double4 *d_n;
double3 *d_dUbdy_dxa;
double3 *d_dUbdy_dna;
double *d_t_run;
double *d_t_tumble;
int *d_tumble_flag;
float4 *d_random_numbers_noise;
// allocate GPU memory
cudaMalloc((void**) &d_x, N * sizeof(double4));
cudaMalloc((void**) &d_n, N * sizeof(double4));
cudaMalloc((void**) &d_dUbdy_dxa, N * sizeof(double3));
cudaMalloc((void**) &d_dUbdy_dna, N * sizeof(double3));
cudaMalloc((void**) &d_t_run, N * sizeof(double));
cudaMalloc((void**) &d_t_tumble, N * sizeof(double));
cudaMalloc((void**) &d_tumble_flag, N * sizeof(int));
cudaMalloc((void **)&d_random_numbers_noise, N * sizeof(float4));
// transfer the array to the GPU
cudaMemcpy(d_x, x, N * sizeof(double4), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, n, N * sizeof(double4), cudaMemcpyHostToDevice);
cudaMemcpy(d_t_run, t_run, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_t_tumble, t_tumble, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_tumble_flag, tumble_flag, N * sizeof(int), cudaMemcpyHostToDevice);
//random number generators:
curandState *d_CurandStates;
curandStatePhilox4_32_10_t *d_PHILOXStates;
cudaMalloc((void **) &d_CurandStates, N * sizeof(curandState));
cudaMalloc((void **) &d_PHILOXStates, N * sizeof(curandStatePhilox4_32_10_t));
// setup seeds
init<<< num_BLOCKS, num_THREADS >>>(seed, d_CurandStates);
init<<< num_BLOCKS, num_THREADS >>>(seed, d_PHILOXStates);
//-----End: set up cuda variables-----
cout << "End: INITIALIZING" << endl;
//-----End: INITIALIZING-----
//-----Start: DYNAMICS LOOP-----
int itime = floor(start_time / dt) + 1;
cout << "itime: " << itime << endl;
cout << "time_steps: " << time_steps << endl;
while (itime <= time_steps)
{
//-----Start: random numbers -----
generate_random_numbers_noise<<< num_BLOCKS, num_THREADS >>>(d_PHILOXStates, d_random_numbers_noise);
//-----End: random numbers -----
//-----Start: boundary interactions-----
calculate_BodyWallInteraction<<< num_BLOCKS, num_THREADS >>>(d_dUbdy_dxa,
d_dUbdy_dna, d_x, d_n,
sigma_bdy, R, C, N);
//-----End: boundary interactions-----
//-----Start: time-marching-----
time_marching<<< num_BLOCKS, num_THREADS >>>
(d_x, d_n,
d_dUbdy_dxa, d_dUbdy_dna,
epsilon_r,
inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R,
dt, N, L,
d_t_run, d_t_tumble, d_tumble_flag,
delta_run, delta_tumble,
avg_n_tumble, std_n_tumble,
d_CurandStates, d_random_numbers_noise);
//-----End: time-marching-----
//-----Start: saving variables-----
if ( itime % timestep_save == 0)
{
// copy back the result array to the CPU
cudaMemcpy(x, d_x, N * sizeof(double4), cudaMemcpyDeviceToHost);
cudaMemcpy(n, d_n, N * sizeof(double4), cudaMemcpyDeviceToHost);
cudaMemcpy(t_run, d_t_run, N * sizeof(double), cudaMemcpyDeviceToHost);
print_to_file_output(sim_num, case_num, itime, N, x, n, t_run);
}
//-----End: saving variables-----
printf("\ntime step: %d", itime);
itime++;
}
cout << endl << endl;
//-----End: DYNAMICS LOOP-----
return 0;
}
|
0600ec73bc305db3bb4ec6c83f16bbce05ecc272.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
__global__ void mulKernel(int* c, const int* a, const int* b, const int WIDTH) {
int x = threadIdx.x;
int y = threadIdx.y;
int i = y * WIDTH + x;
int sum = 0;
for (int k = 0; k < WIDTH; k++) {
sum += a[y*WIDTH+k] * b[k*WIDTH+x];
}
c[i] = sum;
}
int main() {
//host-side
const int WIDTH = 5;
int a[WIDTH][WIDTH];
int b[WIDTH][WIDTH];
int c[WIDTH][WIDTH] = { 0 };
//make a,b matrices
for (int x = 0; x < WIDTH; x++) {
for (int y = 0; y < WIDTH; y++) {
a[x][y] = x * 10 + y;
b[x][y] = (x * 10 + y) * 100;
}
}
//device-side
int* dev_a = 0;
int* dev_b = 0;
int* dev_c = 0;
//allocate device memory
hipMalloc((void**)&dev_a, WIDTH * WIDTH * sizeof(int));
hipMalloc((void**)&dev_b, WIDTH * WIDTH * sizeof(int));
hipMalloc((void**)&dev_c, WIDTH * WIDTH * sizeof(int));
// copy from host to device
hipMemcpy(dev_a, a, WIDTH * WIDTH * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, WIDTH * WIDTH * sizeof(int), hipMemcpyHostToDevice);
//launch the kernel
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(WIDTH, WIDTH, 1);
mulKernel << <1, dimBlock >> > (dev_c, dev_a, dev_b, WIDTH);
hipMemcpy(c, dev_c, WIDTH * WIDTH * sizeof(int),hipMemcpyDeviceToHost);
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
for (int x = 0; x < WIDTH; x++) {
for (int y = 0; y < WIDTH; y++) {
printf("%5d", c[x][y]);
}
printf("\n");
}
return 0;
} | 0600ec73bc305db3bb4ec6c83f16bbce05ecc272.cu | #include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
__global__ void mulKernel(int* c, const int* a, const int* b, const int WIDTH) {
int x = threadIdx.x;
int y = threadIdx.y;
int i = y * WIDTH + x;
int sum = 0;
for (int k = 0; k < WIDTH; k++) {
sum += a[y*WIDTH+k] * b[k*WIDTH+x];
}
c[i] = sum;
}
int main() {
//host-side
const int WIDTH = 5;
int a[WIDTH][WIDTH];
int b[WIDTH][WIDTH];
int c[WIDTH][WIDTH] = { 0 };
//make a,b matrices
for (int x = 0; x < WIDTH; x++) {
for (int y = 0; y < WIDTH; y++) {
a[x][y] = x * 10 + y;
b[x][y] = (x * 10 + y) * 100;
}
}
//device-side
int* dev_a = 0;
int* dev_b = 0;
int* dev_c = 0;
//allocate device memory
cudaMalloc((void**)&dev_a, WIDTH * WIDTH * sizeof(int));
cudaMalloc((void**)&dev_b, WIDTH * WIDTH * sizeof(int));
cudaMalloc((void**)&dev_c, WIDTH * WIDTH * sizeof(int));
// copy from host to device
cudaMemcpy(dev_a, a, WIDTH * WIDTH * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, WIDTH * WIDTH * sizeof(int), cudaMemcpyHostToDevice);
//launch the kernel
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(WIDTH, WIDTH, 1);
mulKernel << <1, dimBlock >> > (dev_c, dev_a, dev_b, WIDTH);
cudaMemcpy(c, dev_c, WIDTH * WIDTH * sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
for (int x = 0; x < WIDTH; x++) {
for (int y = 0; y < WIDTH; y++) {
printf("%5d", c[x][y]);
}
printf("\n");
}
return 0;
} |
1d0164024a6dd38106de789fab531db2c2e7241d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// -*- coding:utf-8 -*-
/*
* Copyright (C) 2016 Sony Corporation
* This is UNPUBLISHED PROPRIETARY SOURCE CODE of Sony Corporation;
* the contents of this file is not to be disclosed to third parties, copied
* or duplicated in any form, in whole or in part, without the prior written
* permission of Sony Corporation.
*/
#include <nbla/array.hpp>
#include <nbla/common.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/celu.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/singleton_manager.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_celu_forward(const int size10_, const int size0_,
const T alpha, const T *x, T *y) {
NBLA_CUDA_KERNEL_LOOP(idx, size10_) {
int i1 = idx / size0_;
int i0 = idx % size0_;
const int j0 = i1 * size0_ * 2 + i0;
const T &xk = x[i1 * size0_ + i0];
y[j0] = 0 <= xk ? xk : alpha * (::exp(xk) - 1);
y[j0 + size0_] = xk <= 0 ? -xk : alpha * (::exp(-xk) - 1);
}
}
template <typename T, bool accum>
__global__ void kernel_celu_backward(const int size10_, const int size0_,
const T alpha, const T *x, const T *dy,
T *dx) {
NBLA_CUDA_KERNEL_LOOP(idx, size10_) {
int i1 = idx / size0_;
int i0 = idx % size0_;
const int j0 = i1 * size0_ * 2 + i0;
const int j1 = j0 + size0_;
const int k = i1 * size0_ + i0;
const T &dyj0 = dy[j0];
const T &dyj1 = dy[j1];
const T &xk = x[k];
dx[k] =
(accum ? dx[k] : 0) + (0 <= xk ? dyj0 : dyj0 * alpha * ::exp(xk));
dx[k] -= xk <= 0 ? dyj1 : dyj1 * alpha * ::exp(-xk);
}
}
template <typename T>
void CELUCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
CELU<T>::setup_impl(inputs, outputs);
}
template <typename T>
void CELUCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_celu_forward,
this->size0_ * this->size1_, this->size0_,
(T) this->alpha_, x, y);
}
template <typename T>
void CELUCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_);
const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_);
if (accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_celu_backward<T, true>),
this->size0_ * this->size1_, this->size0_,
(T) this->alpha_, x, dy, dx);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_celu_backward<T, false>),
this->size0_ * this->size1_, this->size0_,
(T) this->alpha_, x, dy, dx);
}
}
// template instantiation
template class CELUCuda<float>;
}
| 1d0164024a6dd38106de789fab531db2c2e7241d.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// -*- coding:utf-8 -*-
/*
* Copyright (C) 2016 Sony Corporation
* This is UNPUBLISHED PROPRIETARY SOURCE CODE of Sony Corporation;
* the contents of this file is not to be disclosed to third parties, copied
* or duplicated in any form, in whole or in part, without the prior written
* permission of Sony Corporation.
*/
#include <nbla/array.hpp>
#include <nbla/common.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/celu.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/singleton_manager.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_celu_forward(const int size10_, const int size0_,
const T alpha, const T *x, T *y) {
NBLA_CUDA_KERNEL_LOOP(idx, size10_) {
int i1 = idx / size0_;
int i0 = idx % size0_;
const int j0 = i1 * size0_ * 2 + i0;
const T &xk = x[i1 * size0_ + i0];
y[j0] = 0 <= xk ? xk : alpha * (std::exp(xk) - 1);
y[j0 + size0_] = xk <= 0 ? -xk : alpha * (std::exp(-xk) - 1);
}
}
template <typename T, bool accum>
__global__ void kernel_celu_backward(const int size10_, const int size0_,
const T alpha, const T *x, const T *dy,
T *dx) {
NBLA_CUDA_KERNEL_LOOP(idx, size10_) {
int i1 = idx / size0_;
int i0 = idx % size0_;
const int j0 = i1 * size0_ * 2 + i0;
const int j1 = j0 + size0_;
const int k = i1 * size0_ + i0;
const T &dyj0 = dy[j0];
const T &dyj1 = dy[j1];
const T &xk = x[k];
dx[k] =
(accum ? dx[k] : 0) + (0 <= xk ? dyj0 : dyj0 * alpha * std::exp(xk));
dx[k] -= xk <= 0 ? dyj1 : dyj1 * alpha * std::exp(-xk);
}
}
template <typename T>
void CELUCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
CELU<T>::setup_impl(inputs, outputs);
}
template <typename T>
void CELUCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_celu_forward,
this->size0_ * this->size1_, this->size0_,
(T) this->alpha_, x, y);
}
template <typename T>
void CELUCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_);
const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_);
if (accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_celu_backward<T, true>),
this->size0_ * this->size1_, this->size0_,
(T) this->alpha_, x, dy, dx);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_celu_backward<T, false>),
this->size0_ * this->size1_, this->size0_,
(T) this->alpha_, x, dy, dx);
}
}
// template instantiation
template class CELUCuda<float>;
}
|
381d07a45f467f2e80479de15f4071be637b6199.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//Sriram Madhivanan
//GPU kernels
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
#ifndef huffman_parallel
#include "../../../library/huffman/parallel/huffman_parallel.h"
#endif
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//encode
__global__ void encode_single_run_no_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, huffmanDictionary_t *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_inputFileLength, unsigned int numInputDataBlocks){
__shared__ huffmanDictionary_t d_huffmanDictionary_shared;
unsigned int inputFileLength = d_inputFileLength;
unsigned int pos = threadIdx.x;
for(unsigned int i = blockIdx.x; i < numInputDataBlocks; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i < numInputDataBlocks - 1 ? i * BLOCK_SIZE + BLOCK_SIZE : inputFileLength;
//copy the input char's encoded bytes into d_byteCompressedData
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//compress
__global__ void compress_single_run_no_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, unsigned char *d_byteCompressedData, unsigned int inputFileLength){
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int upperLimit = d_compressedDataOffset[inputFileLength];
for(unsigned int i = pos * 8; i < upperLimit; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[i / 8] = d_inputFileData[i / 8] << 1;
}
else{
d_inputFileData[i / 8] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//encode
__global__ void encode_single_run_with_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, huffmanDictionary_t *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_inputFileLength, unsigned int numInputDataBlocks, unsigned int overFlowBlock, unsigned char *d_byteCompressedData_overflow){
__shared__ huffmanDictionary_t d_huffmanDictionary_shared;
unsigned int inputFileLength = d_inputFileLength;
unsigned int pos = threadIdx.x;
//till overflow
for(unsigned int i = blockIdx.x; i < overFlowBlock; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i * BLOCK_SIZE + BLOCK_SIZE;
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
//beyond overflow
for(unsigned int i = blockIdx.x + overFlowBlock; i < numInputDataBlocks; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i < numInputDataBlocks - 1 ? i * BLOCK_SIZE + BLOCK_SIZE : inputFileLength;
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
if(i == overFlowBlock && j == (i * BLOCK_SIZE)){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData_overflow[k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
continue;
}
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData_overflow[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//compress
__global__ void compress_single_run_with_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, unsigned char *d_byteCompressedData, unsigned int d_inputFileLength, unsigned int overFlowBlock, unsigned char *d_byteCompressedData_overflow){
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int upperLimit_1 = d_compressedDataOffset[overFlowBlock * BLOCK_SIZE];
for(unsigned int i = pos * 8; i < upperLimit_1; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[i / 8] = d_inputFileData[i / 8] << 1;
}
else{
d_inputFileData[i / 8] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
unsigned int offset_overflow = d_compressedDataOffset[overFlowBlock * BLOCK_SIZE] / 8;
unsigned int upperLimit_2 = d_compressedDataOffset[d_inputFileLength];
for(unsigned int i = pos * 8; i < upperLimit_2; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData_overflow[i + j] == 0){
d_inputFileData[(i / 8) + offset_overflow] = d_inputFileData[(i / 8) + offset_overflow] << 1;
}
else{
d_inputFileData[(i / 8) + offset_overflow] = (d_inputFileData[(i / 8) + offset_overflow] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//encode
__global__ void encode_multiple_runs_no_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, huffmanDictionary_t *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_inputBlockLength, unsigned int d_numInputDataBlocks, unsigned int d_lowerBlock, unsigned int d_upperBlock, unsigned int d_inputFileLength){
__shared__ huffmanDictionary_t d_huffmanDictionary_shared;
//unsigned int inputBlockLength = d_inputBlockLength;
unsigned int pos = threadIdx.x;
for(unsigned int i = d_lowerBlock + blockIdx.x; i < d_upperBlock; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i < d_numInputDataBlocks - 1 ? i * BLOCK_SIZE + BLOCK_SIZE : d_inputFileLength;
//copy the input char's encoded bytes into d_byteCompressedData
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//compress
__global__ void compress_multiple_runs_no_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, unsigned char *d_byteCompressedData, unsigned int d_upperBlock, unsigned int d_writePosition){
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int writePosition = d_writePosition;
unsigned int upperLimit = d_compressedDataOffset[d_upperBlock];
for(unsigned int i = pos * 8; i < upperLimit; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[writePosition + (i / 8)] = d_inputFileData[writePosition + (i / 8)] << 1;
}
else{
d_inputFileData[writePosition + (i / 8)] = (d_inputFileData[writePosition + (i / 8)] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//encode
__global__ void encode_multiple_runs_with_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, huffmanDictionary_t *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_inputBlockLength, unsigned int numInputDataBlocks, unsigned int lowerBlock, unsigned int upperBlock, unsigned int overFlowBlock, unsigned char *d_byteCompressedData_overflow, unsigned int d_inputFileLength){
__shared__ huffmanDictionary_t d_huffmanDictionary_shared;
unsigned int inputFileLength = d_inputFileLength;
unsigned int pos = threadIdx.x;
//till overflow
for(unsigned int i = blockIdx.x; i < overFlowBlock; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i * BLOCK_SIZE + BLOCK_SIZE;
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
//beyond overflow
for(unsigned int i = blockIdx.x + overFlowBlock; i < numInputDataBlocks; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i < numInputDataBlocks - 1 ? i * BLOCK_SIZE + BLOCK_SIZE : inputFileLength;
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
if(i == overFlowBlock && j == (i * BLOCK_SIZE)){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData_overflow[k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
continue;
}
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData_overflow[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//compress
__global__ void compress_multiple_runs_with_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, unsigned char *d_byteCompressedData, unsigned int d_inputBlockLength, unsigned int d_writePosition, unsigned int overFlowBlock, unsigned int upperBlock, unsigned char *d_byteCompressedData_overflow){
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int upperLimit_1 = d_compressedDataOffset[overFlowBlock * BLOCK_SIZE];
unsigned int writePosition = d_writePosition;
for(unsigned int i = pos * 8; i < upperLimit_1; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[writePosition + (i / 8)] = d_inputFileData[writePosition + (i / 8)] << 1;
}
else{
d_inputFileData[writePosition + (i / 8)] = (d_inputFileData[writePosition + (i / 8)] << 1) | 1;
}
}
}
unsigned int offset_overflow = d_compressedDataOffset[overFlowBlock * BLOCK_SIZE] / 8;
unsigned int upperLimit_2 = d_compressedDataOffset[upperBlock * BLOCK_SIZE];
for(unsigned int i = pos * 8; i < upperLimit_2; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData_overflow[i + j] == 0){
d_inputFileData[writePosition + (i / 8) + offset_overflow] = d_inputFileData[writePosition + (i / 8) + offset_overflow] << 1;
}
else{
d_inputFileData[writePosition + (i / 8) + offset_overflow] = (d_inputFileData[writePosition + (i / 8) + offset_overflow] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/ | 381d07a45f467f2e80479de15f4071be637b6199.cu | /*---------------------------------------------------------------------------------------------------------------------------------------------*/
//Sriram Madhivanan
//GPU kernels
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
#ifndef huffman_parallel
#include "../../../library/huffman/parallel/huffman_parallel.h"
#endif
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//encode
__global__ void encode_single_run_no_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, huffmanDictionary_t *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_inputFileLength, unsigned int numInputDataBlocks){
__shared__ huffmanDictionary_t d_huffmanDictionary_shared;
unsigned int inputFileLength = d_inputFileLength;
unsigned int pos = threadIdx.x;
for(unsigned int i = blockIdx.x; i < numInputDataBlocks; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i < numInputDataBlocks - 1 ? i * BLOCK_SIZE + BLOCK_SIZE : inputFileLength;
//copy the input char's encoded bytes into d_byteCompressedData
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//compress
__global__ void compress_single_run_no_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, unsigned char *d_byteCompressedData, unsigned int inputFileLength){
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int upperLimit = d_compressedDataOffset[inputFileLength];
for(unsigned int i = pos * 8; i < upperLimit; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[i / 8] = d_inputFileData[i / 8] << 1;
}
else{
d_inputFileData[i / 8] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//encode
__global__ void encode_single_run_with_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, huffmanDictionary_t *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_inputFileLength, unsigned int numInputDataBlocks, unsigned int overFlowBlock, unsigned char *d_byteCompressedData_overflow){
__shared__ huffmanDictionary_t d_huffmanDictionary_shared;
unsigned int inputFileLength = d_inputFileLength;
unsigned int pos = threadIdx.x;
//till overflow
for(unsigned int i = blockIdx.x; i < overFlowBlock; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i * BLOCK_SIZE + BLOCK_SIZE;
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
//beyond overflow
for(unsigned int i = blockIdx.x + overFlowBlock; i < numInputDataBlocks; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i < numInputDataBlocks - 1 ? i * BLOCK_SIZE + BLOCK_SIZE : inputFileLength;
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
if(i == overFlowBlock && j == (i * BLOCK_SIZE)){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData_overflow[k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
continue;
}
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData_overflow[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//compress
__global__ void compress_single_run_with_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, unsigned char *d_byteCompressedData, unsigned int d_inputFileLength, unsigned int overFlowBlock, unsigned char *d_byteCompressedData_overflow){
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int upperLimit_1 = d_compressedDataOffset[overFlowBlock * BLOCK_SIZE];
for(unsigned int i = pos * 8; i < upperLimit_1; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[i / 8] = d_inputFileData[i / 8] << 1;
}
else{
d_inputFileData[i / 8] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
unsigned int offset_overflow = d_compressedDataOffset[overFlowBlock * BLOCK_SIZE] / 8;
unsigned int upperLimit_2 = d_compressedDataOffset[d_inputFileLength];
for(unsigned int i = pos * 8; i < upperLimit_2; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData_overflow[i + j] == 0){
d_inputFileData[(i / 8) + offset_overflow] = d_inputFileData[(i / 8) + offset_overflow] << 1;
}
else{
d_inputFileData[(i / 8) + offset_overflow] = (d_inputFileData[(i / 8) + offset_overflow] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//encode
__global__ void encode_multiple_runs_no_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, huffmanDictionary_t *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_inputBlockLength, unsigned int d_numInputDataBlocks, unsigned int d_lowerBlock, unsigned int d_upperBlock, unsigned int d_inputFileLength){
__shared__ huffmanDictionary_t d_huffmanDictionary_shared;
//unsigned int inputBlockLength = d_inputBlockLength;
unsigned int pos = threadIdx.x;
for(unsigned int i = d_lowerBlock + blockIdx.x; i < d_upperBlock; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i < d_numInputDataBlocks - 1 ? i * BLOCK_SIZE + BLOCK_SIZE : d_inputFileLength;
//copy the input char's encoded bytes into d_byteCompressedData
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//compress
__global__ void compress_multiple_runs_no_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, unsigned char *d_byteCompressedData, unsigned int d_upperBlock, unsigned int d_writePosition){
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int writePosition = d_writePosition;
unsigned int upperLimit = d_compressedDataOffset[d_upperBlock];
for(unsigned int i = pos * 8; i < upperLimit; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[writePosition + (i / 8)] = d_inputFileData[writePosition + (i / 8)] << 1;
}
else{
d_inputFileData[writePosition + (i / 8)] = (d_inputFileData[writePosition + (i / 8)] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//encode
__global__ void encode_multiple_runs_with_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, huffmanDictionary_t *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_inputBlockLength, unsigned int numInputDataBlocks, unsigned int lowerBlock, unsigned int upperBlock, unsigned int overFlowBlock, unsigned char *d_byteCompressedData_overflow, unsigned int d_inputFileLength){
__shared__ huffmanDictionary_t d_huffmanDictionary_shared;
unsigned int inputFileLength = d_inputFileLength;
unsigned int pos = threadIdx.x;
//till overflow
for(unsigned int i = blockIdx.x; i < overFlowBlock; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i * BLOCK_SIZE + BLOCK_SIZE;
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
//beyond overflow
for(unsigned int i = blockIdx.x + overFlowBlock; i < numInputDataBlocks; i += gridDim.x){
//copy the specific dictionary to the shared memory
if(threadIdx.x == 0){
memcpy(&d_huffmanDictionary_shared, &d_huffmanDictionary[i], sizeof(huffmanDictionary_t));
}
__syncthreads();
unsigned int upperLimit = i < numInputDataBlocks - 1 ? i * BLOCK_SIZE + BLOCK_SIZE : inputFileLength;
for(unsigned int j = (i * BLOCK_SIZE) + pos; j < upperLimit; j += blockDim.x){
if(i == overFlowBlock && j == (i * BLOCK_SIZE)){
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData_overflow[k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
continue;
}
for(unsigned int k = 0; k < d_huffmanDictionary_shared.bitSequenceLength[d_inputFileData[j]]; k++){
d_byteCompressedData_overflow[d_compressedDataOffset[j] + k] = d_huffmanDictionary_shared.bitSequence[d_inputFileData[j]][k];
}
}
__syncthreads();
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//compress
__global__ void compress_multiple_runs_with_overflow(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, unsigned char *d_byteCompressedData, unsigned int d_inputBlockLength, unsigned int d_writePosition, unsigned int overFlowBlock, unsigned int upperBlock, unsigned char *d_byteCompressedData_overflow){
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int upperLimit_1 = d_compressedDataOffset[overFlowBlock * BLOCK_SIZE];
unsigned int writePosition = d_writePosition;
for(unsigned int i = pos * 8; i < upperLimit_1; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[writePosition + (i / 8)] = d_inputFileData[writePosition + (i / 8)] << 1;
}
else{
d_inputFileData[writePosition + (i / 8)] = (d_inputFileData[writePosition + (i / 8)] << 1) | 1;
}
}
}
unsigned int offset_overflow = d_compressedDataOffset[overFlowBlock * BLOCK_SIZE] / 8;
unsigned int upperLimit_2 = d_compressedDataOffset[upperBlock * BLOCK_SIZE];
for(unsigned int i = pos * 8; i < upperLimit_2; i += (blockDim.x * gridDim.x) * 8){
for(unsigned int j = 0; j < 8; j++){
if(d_byteCompressedData_overflow[i + j] == 0){
d_inputFileData[writePosition + (i / 8) + offset_overflow] = d_inputFileData[writePosition + (i / 8) + offset_overflow] << 1;
}
else{
d_inputFileData[writePosition + (i / 8) + offset_overflow] = (d_inputFileData[writePosition + (i / 8) + offset_overflow] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/ |
299a17ce3400c19df7a199a151dc0d9a0071c2ab.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2021-2023 by XGBoost Contributors
*/
#include <thrust/scan.h>
#include <algorithm>
#include <cassert>
#include <hipcub/hipcub.hpp>
#include <limits>
#include <memory>
#include <tuple>
#include <utility>
#include "../collective/device_communicator.cuh"
#include "../common/optional_weight.h" // OptionalWeights
#include "../common/ranking_utils.cuh"
#include "auc.h"
#include "xgboost/data.h"
#include "xgboost/span.h"
namespace xgboost {
namespace metric {
namespace {
// Pair of FP/TP
using Pair = thrust::pair<double, double>;
template <typename T, typename U, typename P = thrust::pair<T, U>>
struct PairPlus : public thrust::binary_function<P, P, P> {
XGBOOST_DEVICE P operator()(P const& l, P const& r) const {
return thrust::make_pair(l.first + r.first, l.second + r.second);
}
};
} // namespace
/**
* A cache to GPU data to avoid reallocating memory.
*/
struct DeviceAUCCache {
// index sorted by prediction value
dh::device_vector<size_t> sorted_idx;
// track FP/TP for computation on trapezoid area
dh::device_vector<Pair> fptp;
// track FP_PREV/TP_PREV for computation on trapezoid area
dh::device_vector<Pair> neg_pos;
// index of unique prediction values.
dh::device_vector<size_t> unique_idx;
// p^T: transposed prediction matrix, used by MultiClassAUC
dh::device_vector<float> predts_t;
void Init(common::Span<float const> predts, bool is_multi) {
if (sorted_idx.size() != predts.size()) {
sorted_idx.resize(predts.size());
fptp.resize(sorted_idx.size());
unique_idx.resize(sorted_idx.size());
neg_pos.resize(sorted_idx.size());
if (is_multi) {
predts_t.resize(sorted_idx.size());
}
}
}
};
template <bool is_multi>
void InitCacheOnce(common::Span<float const> predts, std::shared_ptr<DeviceAUCCache> *p_cache) {
auto& cache = *p_cache;
if (!cache) {
cache.reset(new DeviceAUCCache);
}
cache->Init(predts, is_multi);
}
/**
* The GPU implementation uses same calculation as CPU with a few more steps to distribute
* work across threads:
*
* - Run scan to obtain TP/FP values, which are right coordinates of trapezoid.
* - Find distinct prediction values and get the corresponding FP_PREV/TP_PREV value,
* which are left coordinates of trapezoids.
* - Reduce the scan array into 1 AUC value.
*/
template <typename Fn>
std::tuple<double, double, double>
GPUBinaryAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, common::Span<size_t const> d_sorted_idx,
Fn area_fn, std::shared_ptr<DeviceAUCCache> cache) {
auto labels = info.labels.View(device);
auto weights = info.weights_.ConstDeviceSpan();
dh::safe_cuda(hipSetDevice(device));
CHECK_NE(labels.Size(), 0);
CHECK_EQ(labels.Size(), predts.size());
/**
* Linear scan
*/
auto get_weight = common::OptionalWeights{weights};
auto get_fp_tp = [=]XGBOOST_DEVICE(size_t i) {
size_t idx = d_sorted_idx[i];
float label = labels(idx);
float w = get_weight[d_sorted_idx[i]];
float fp = (1.0 - label) * w;
float tp = label * w;
return thrust::make_pair(fp, tp);
}; // NOLINT
auto d_fptp = dh::ToSpan(cache->fptp);
dh::LaunchN(d_sorted_idx.size(),
[=] XGBOOST_DEVICE(size_t i) { d_fptp[i] = get_fp_tp(i); });
dh::XGBDeviceAllocator<char> alloc;
auto d_unique_idx = dh::ToSpan(cache->unique_idx);
dh::Iota(d_unique_idx);
auto uni_key = dh::MakeTransformIterator<float>(
thrust::make_counting_iterator(0),
[=] XGBOOST_DEVICE(size_t i) { return predts[d_sorted_idx[i]]; });
auto end_unique = thrust::unique_by_key_copy(
thrust::hip::par(alloc), uni_key, uni_key + d_sorted_idx.size(),
dh::tbegin(d_unique_idx), thrust::make_discard_iterator(),
dh::tbegin(d_unique_idx));
d_unique_idx = d_unique_idx.subspan(0, end_unique.second - dh::tbegin(d_unique_idx));
dh::InclusiveScan(dh::tbegin(d_fptp), dh::tbegin(d_fptp),
PairPlus<double, double>{}, d_fptp.size());
auto d_neg_pos = dh::ToSpan(cache->neg_pos);
// scatter unique negaive/positive values
// shift to right by 1 with initial value being 0
dh::LaunchN(d_unique_idx.size(), [=] XGBOOST_DEVICE(size_t i) {
if (d_unique_idx[i] == 0) { // first unique index is 0
assert(i == 0);
d_neg_pos[0] = {0, 0};
return;
}
d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1];
if (i == d_unique_idx.size() - 1) {
// last one needs to be included, may override above assignment if the last
// prediction value is distinct from previous one.
d_neg_pos.back() = d_fptp[d_unique_idx[i] - 1];
return;
}
});
auto in = dh::MakeTransformIterator<double>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
double fp, tp;
double fp_prev, tp_prev;
if (i == 0) {
// handle the last element
thrust::tie(fp, tp) = d_fptp.back();
thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx.back()];
} else {
thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1];
thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]];
}
return area_fn(fp_prev, fp, tp_prev, tp);
});
Pair last = cache->fptp.back();
double auc = thrust::reduce(thrust::hip::par(alloc), in, in + d_unique_idx.size());
return std::make_tuple(last.first, last.second, auc);
}
std::tuple<double, double, double> GPUBinaryROCAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
auto &cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);
/**
* Create sorted index for each class
*/
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
dh::ArgSort<false>(predts, d_sorted_idx);
// Create lambda to avoid pass function pointer.
return GPUBinaryAUC(
predts, info, device, d_sorted_idx,
[] XGBOOST_DEVICE(double x0, double x1, double y0, double y1) -> double {
return TrapezoidArea(x0, x1, y0, y1);
},
cache);
}
void Transpose(common::Span<float const> in, common::Span<float> out, size_t m,
size_t n) {
CHECK_EQ(in.size(), out.size());
CHECK_EQ(in.size(), m * n);
dh::LaunchN(in.size(), [=] XGBOOST_DEVICE(size_t i) {
size_t col = i / m;
size_t row = i % m;
size_t idx = row * n + col;
out[i] = in[idx];
});
}
double ScaleClasses(common::Span<double> results, common::Span<double> local_area,
common::Span<double> tp, common::Span<double> auc, size_t n_classes) {
dh::XGBDeviceAllocator<char> alloc;
if (collective::IsDistributed()) {
int32_t device = dh::CurrentDevice();
CHECK_EQ(dh::CudaGetPointerDevice(results.data()), device);
auto* communicator = collective::Communicator::GetDevice(device);
communicator->AllReduceSum(results.data(), results.size());
}
auto reduce_in = dh::MakeTransformIterator<Pair>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
if (local_area[i] > 0) {
return thrust::make_pair(auc[i] / local_area[i] * tp[i], tp[i]);
}
return thrust::make_pair(std::numeric_limits<double>::quiet_NaN(), 0.0);
});
double tp_sum;
double auc_sum;
thrust::tie(auc_sum, tp_sum) =
thrust::reduce(thrust::hip::par(alloc), reduce_in, reduce_in + n_classes,
Pair{0.0, 0.0}, PairPlus<double, double>{});
if (tp_sum != 0 && !std::isnan(auc_sum)) {
auc_sum /= tp_sum;
} else {
return std::numeric_limits<double>::quiet_NaN();
}
return auc_sum;
}
/**
* Calculate FP/TP for multi-class and PR-AUC ranking. `segment_id` is a function for
* getting class id or group id given scan index.
*/
template <typename Fn>
void SegmentedFPTP(common::Span<Pair> d_fptp, Fn segment_id) {
using Triple = thrust::tuple<uint32_t, double, double>;
// expand to tuple to include idx
auto fptp_it_in = dh::MakeTransformIterator<Triple>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
return thrust::make_tuple(i, d_fptp[i].first, d_fptp[i].second);
});
// shrink down to pair
auto fptp_it_out = thrust::make_transform_output_iterator(
dh::TypedDiscard<Triple>{}, [d_fptp] XGBOOST_DEVICE(Triple const &t) {
d_fptp[thrust::get<0>(t)] =
thrust::make_pair(thrust::get<1>(t), thrust::get<2>(t));
return t;
});
dh::InclusiveScan(
fptp_it_in, fptp_it_out,
[=] XGBOOST_DEVICE(Triple const &l, Triple const &r) {
uint32_t l_gid = segment_id(thrust::get<0>(l));
uint32_t r_gid = segment_id(thrust::get<0>(r));
if (l_gid != r_gid) {
return r;
}
return Triple(thrust::get<0>(r),
thrust::get<1>(l) + thrust::get<1>(r), // fp
thrust::get<2>(l) + thrust::get<2>(r)); // tp
},
d_fptp.size());
}
/**
* Reduce the values of AUC for each group/class.
*/
template <typename Area, typename Seg>
void SegmentedReduceAUC(common::Span<size_t const> d_unique_idx,
common::Span<uint32_t const> d_class_ptr,
common::Span<uint32_t const> d_unique_class_ptr,
std::shared_ptr<DeviceAUCCache> cache,
Area area_fn,
Seg segment_id,
common::Span<double> d_auc) {
auto d_fptp = dh::ToSpan(cache->fptp);
auto d_neg_pos = dh::ToSpan(cache->neg_pos);
dh::XGBDeviceAllocator<char> alloc;
auto key_in = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
size_t class_id = segment_id(d_unique_idx[i]);
return class_id;
});
auto val_in = dh::MakeTransformIterator<double>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
size_t class_id = segment_id(d_unique_idx[i]);
double fp, tp, fp_prev, tp_prev;
if (i == d_unique_class_ptr[class_id]) {
// first item is ignored, we use this thread to calculate the last item
thrust::tie(fp, tp) = d_fptp[common::LastOf(class_id, d_class_ptr)];
thrust::tie(fp_prev, tp_prev) =
d_neg_pos[d_unique_idx[common::LastOf(class_id, d_unique_class_ptr)]];
} else {
thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1];
thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]];
}
double auc = area_fn(fp_prev, fp, tp_prev, tp, class_id);
return auc;
});
thrust::reduce_by_key(thrust::hip::par(alloc), key_in,
key_in + d_unique_idx.size(), val_in,
thrust::make_discard_iterator(), dh::tbegin(d_auc));
}
/**
* MultiClass implementation is similar to binary classification, except we need to split
* up each class in all kernels.
*/
template <bool scale, typename Fn>
double GPUMultiClassAUCOVR(MetaInfo const &info, int32_t device, common::Span<uint32_t> d_class_ptr,
size_t n_classes, std::shared_ptr<DeviceAUCCache> cache, Fn area_fn) {
dh::safe_cuda(hipSetDevice(device));
/**
* Sorted idx
*/
auto d_predts_t = dh::ToSpan(cache->predts_t);
// Index is sorted within class.
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
auto labels = info.labels.View(device);
auto weights = info.weights_.ConstDeviceSpan();
size_t n_samples = labels.Shape(0);
if (n_samples == 0) {
dh::TemporaryArray<double> resutls(n_classes * 4, 0.0f);
auto d_results = dh::ToSpan(resutls);
dh::LaunchN(n_classes * 4,
[=] XGBOOST_DEVICE(size_t i) { d_results[i] = 0.0f; });
auto local_area = d_results.subspan(0, n_classes);
auto tp = d_results.subspan(2 * n_classes, n_classes);
auto auc = d_results.subspan(3 * n_classes, n_classes);
return ScaleClasses(d_results, local_area, tp, auc, n_classes);
}
/**
* Linear scan
*/
dh::caching_device_vector<double> d_auc(n_classes, 0);
auto get_weight = common::OptionalWeights{weights};
auto d_fptp = dh::ToSpan(cache->fptp);
auto get_fp_tp = [=]XGBOOST_DEVICE(size_t i) {
size_t idx = d_sorted_idx[i];
size_t class_id = i / n_samples;
// labels is a vector of size n_samples.
float label = labels(idx % n_samples) == class_id;
float w = get_weight[d_sorted_idx[i] % n_samples];
float fp = (1.0 - label) * w;
float tp = label * w;
return thrust::make_pair(fp, tp);
}; // NOLINT
dh::LaunchN(d_sorted_idx.size(),
[=] XGBOOST_DEVICE(size_t i) { d_fptp[i] = get_fp_tp(i); });
/**
* Handle duplicated predictions
*/
dh::XGBDeviceAllocator<char> alloc;
auto d_unique_idx = dh::ToSpan(cache->unique_idx);
dh::Iota(d_unique_idx);
auto uni_key = dh::MakeTransformIterator<thrust::pair<uint32_t, float>>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
uint32_t class_id = i / n_samples;
float predt = d_predts_t[d_sorted_idx[i]];
return thrust::make_pair(class_id, predt);
});
// unique values are sparse, so we need a CSR style indptr
dh::TemporaryArray<uint32_t> unique_class_ptr(d_class_ptr.size());
auto d_unique_class_ptr = dh::ToSpan(unique_class_ptr);
auto n_uniques = dh::SegmentedUniqueByKey(
thrust::hip::par(alloc),
dh::tbegin(d_class_ptr),
dh::tend(d_class_ptr),
uni_key,
uni_key + d_sorted_idx.size(),
dh::tbegin(d_unique_idx),
d_unique_class_ptr.data(),
dh::tbegin(d_unique_idx),
thrust::equal_to<thrust::pair<uint32_t, float>>{});
d_unique_idx = d_unique_idx.subspan(0, n_uniques);
auto get_class_id = [=] XGBOOST_DEVICE(size_t idx) { return idx / n_samples; };
SegmentedFPTP(d_fptp, get_class_id);
// scatter unique FP_PREV/TP_PREV values
auto d_neg_pos = dh::ToSpan(cache->neg_pos);
// When dataset is not empty, each class must have at least 1 (unique) sample
// prediction, so no need to handle special case.
dh::LaunchN(d_unique_idx.size(), [=] XGBOOST_DEVICE(size_t i) {
if (d_unique_idx[i] % n_samples == 0) { // first unique index is 0
assert(d_unique_idx[i] % n_samples == 0);
d_neg_pos[d_unique_idx[i]] = {0, 0}; // class_id * n_samples = i
return;
}
uint32_t class_id = d_unique_idx[i] / n_samples;
d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1];
if (i == common::LastOf(class_id, d_unique_class_ptr)) {
// last one needs to be included.
size_t last = d_unique_idx[common::LastOf(class_id, d_unique_class_ptr)];
d_neg_pos[common::LastOf(class_id, d_class_ptr)] = d_fptp[last - 1];
return;
}
});
/**
* Reduce the result for each class
*/
auto s_d_auc = dh::ToSpan(d_auc);
SegmentedReduceAUC(d_unique_idx, d_class_ptr, d_unique_class_ptr, cache,
area_fn, get_class_id, s_d_auc);
/**
* Scale the classes with number of samples for each class.
*/
dh::TemporaryArray<double> resutls(n_classes * 4);
auto d_results = dh::ToSpan(resutls);
auto local_area = d_results.subspan(0, n_classes);
auto fp = d_results.subspan(n_classes, n_classes);
auto tp = d_results.subspan(2 * n_classes, n_classes);
auto auc = d_results.subspan(3 * n_classes, n_classes);
dh::LaunchN(n_classes, [=] XGBOOST_DEVICE(size_t c) {
auc[c] = s_d_auc[c];
auto last = d_fptp[n_samples * c + (n_samples - 1)];
fp[c] = last.first;
if (scale) {
local_area[c] = last.first * last.second;
tp[c] = last.second;
} else {
local_area[c] = 1.0f;
tp[c] = 1.0f;
}
});
return ScaleClasses(d_results, local_area, tp, auc, n_classes);
}
void MultiClassSortedIdx(common::Span<float const> predts,
common::Span<uint32_t> d_class_ptr,
std::shared_ptr<DeviceAUCCache> cache) {
size_t n_classes = d_class_ptr.size() - 1;
auto d_predts_t = dh::ToSpan(cache->predts_t);
auto n_samples = d_predts_t.size() / n_classes;
if (n_samples == 0) {
return;
}
Transpose(predts, d_predts_t, n_samples, n_classes);
dh::LaunchN(n_classes + 1,
[=] XGBOOST_DEVICE(size_t i) { d_class_ptr[i] = i * n_samples; });
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx);
}
double GPUMultiClassROCAUC(common::Span<float const> predts, MetaInfo const &info,
std::int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache,
std::size_t n_classes) {
auto& cache = *p_cache;
InitCacheOnce<true>(predts, p_cache);
/**
* Create sorted index for each class
*/
dh::TemporaryArray<uint32_t> class_ptr(n_classes + 1, 0);
MultiClassSortedIdx(predts, dh::ToSpan(class_ptr), cache);
auto fn = [] XGBOOST_DEVICE(double fp_prev, double fp, double tp_prev,
double tp, size_t /*class_id*/) {
return TrapezoidArea(fp_prev, fp, tp_prev, tp);
};
return GPUMultiClassAUCOVR<true>(info, device, dh::ToSpan(class_ptr), n_classes, cache, fn);
}
namespace {
struct RankScanItem {
size_t idx;
double predt;
double w;
bst_group_t group_id;
};
} // anonymous namespace
std::pair<double, std::uint32_t> GPURankingAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
auto& cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);
dh::caching_device_vector<bst_group_t> group_ptr(info.group_ptr_);
dh::XGBCachingDeviceAllocator<char> alloc;
auto d_group_ptr = dh::ToSpan(group_ptr);
/**
* Validate the dataset
*/
auto check_it = dh::MakeTransformIterator<size_t>(
thrust::make_counting_iterator(0),
[=] XGBOOST_DEVICE(size_t i) { return d_group_ptr[i + 1] - d_group_ptr[i]; });
size_t n_valid = thrust::count_if(
thrust::hip::par(alloc), check_it, check_it + group_ptr.size() - 1,
[=] XGBOOST_DEVICE(size_t len) { return len >= 3; });
if (n_valid < info.group_ptr_.size() - 1) {
InvalidGroupAUC();
}
if (n_valid == 0) {
return std::make_pair(0.0, 0);
}
/**
* Sort the labels
*/
auto d_labels = info.labels.View(device);
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
dh::SegmentedArgSort<false>(d_labels.Values(), d_group_ptr, d_sorted_idx);
auto d_weights = info.weights_.ConstDeviceSpan();
dh::caching_device_vector<size_t> threads_group_ptr(group_ptr.size(), 0);
auto d_threads_group_ptr = dh::ToSpan(threads_group_ptr);
// Use max to represent triangle
auto n_threads = common::SegmentedTrapezoidThreads(
d_group_ptr, d_threads_group_ptr, std::numeric_limits<size_t>::max());
CHECK_LT(n_threads, std::numeric_limits<int32_t>::max());
// get the coordinate in nested summation
auto get_i_j = [=]XGBOOST_DEVICE(size_t idx, size_t query_group_idx) {
auto data_group_begin = d_group_ptr[query_group_idx];
size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin;
auto thread_group_begin = d_threads_group_ptr[query_group_idx];
auto idx_in_thread_group = idx - thread_group_begin;
size_t i, j;
common::UnravelTrapeziodIdx(idx_in_thread_group, n_samples, &i, &j);
// we use global index among all groups for sorted idx, so i, j should also be global
// index.
i += data_group_begin;
j += data_group_begin;
return thrust::make_pair(i, j);
}; // NOLINT
auto in = dh::MakeTransformIterator<RankScanItem>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t idx) {
bst_group_t query_group_idx = dh::SegmentId(d_threads_group_ptr, idx);
auto data_group_begin = d_group_ptr[query_group_idx];
size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin;
if (n_samples < 3) {
// at least 3 documents are required.
return RankScanItem{idx, 0, 0, query_group_idx};
}
size_t i, j;
thrust::tie(i, j) = get_i_j(idx, query_group_idx);
float predt = predts[d_sorted_idx[i]] - predts[d_sorted_idx[j]];
float w = common::Sqr(d_weights.empty() ? 1.0f : d_weights[query_group_idx]);
if (predt > 0) {
predt = 1.0;
} else if (predt == 0) {
predt = 0.5;
} else {
predt = 0;
}
predt *= w;
return RankScanItem{idx, predt, w, query_group_idx};
});
dh::TemporaryArray<double> d_auc(group_ptr.size() - 1);
auto s_d_auc = dh::ToSpan(d_auc);
auto out = thrust::make_transform_output_iterator(
dh::TypedDiscard<RankScanItem>{},
[=] XGBOOST_DEVICE(RankScanItem const &item) -> RankScanItem {
auto group_id = item.group_id;
assert(group_id < d_group_ptr.size());
auto data_group_begin = d_group_ptr[group_id];
size_t n_samples = d_group_ptr[group_id + 1] - data_group_begin;
// last item of current group
if (item.idx == common::LastOf(group_id, d_threads_group_ptr)) {
if (item.w > 0) {
s_d_auc[group_id] = item.predt / item.w;
} else {
s_d_auc[group_id] = 0;
}
}
return {}; // discard
});
dh::InclusiveScan(
in, out,
[] XGBOOST_DEVICE(RankScanItem const &l, RankScanItem const &r) {
if (l.group_id != r.group_id) {
return r;
}
return RankScanItem{r.idx, l.predt + r.predt, l.w + r.w, l.group_id};
},
n_threads);
/**
* Scale the AUC with number of items in each group.
*/
double auc = thrust::reduce(thrust::hip::par(alloc), dh::tbegin(s_d_auc),
dh::tend(s_d_auc), 0.0);
return std::make_pair(auc, n_valid);
}
std::tuple<double, double, double> GPUBinaryPRAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
auto& cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);
/**
* Create sorted index for each class
*/
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
dh::ArgSort<false>(predts, d_sorted_idx);
auto labels = info.labels.View(device);
auto d_weights = info.weights_.ConstDeviceSpan();
auto get_weight = common::OptionalWeights{d_weights};
auto it = dh::MakeTransformIterator<Pair>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) {
auto w = get_weight[d_sorted_idx[i]];
return thrust::make_pair(labels(d_sorted_idx[i]) * w,
(1.0f - labels(d_sorted_idx[i])) * w);
});
dh::XGBCachingDeviceAllocator<char> alloc;
double total_pos, total_neg;
thrust::tie(total_pos, total_neg) =
thrust::reduce(thrust::hip::par(alloc), it, it + labels.Size(),
Pair{0.0, 0.0}, PairPlus<double, double>{});
if (total_pos <= 0.0 || total_neg <= 0.0) {
return {0.0f, 0.0f, 0.0f};
}
auto fn = [total_pos] XGBOOST_DEVICE(double fp_prev, double fp, double tp_prev,
double tp) {
return detail::CalcDeltaPRAUC(fp_prev, fp, tp_prev, tp, total_pos);
};
double fp, tp, auc;
std::tie(fp, tp, auc) = GPUBinaryAUC(predts, info, device, d_sorted_idx, fn, cache);
return std::make_tuple(1.0, 1.0, auc);
}
double GPUMultiClassPRAUC(common::Span<float const> predts, MetaInfo const &info,
std::int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache,
std::size_t n_classes) {
auto& cache = *p_cache;
InitCacheOnce<true>(predts, p_cache);
/**
* Create sorted index for each class
*/
dh::TemporaryArray<uint32_t> class_ptr(n_classes + 1, 0);
auto d_class_ptr = dh::ToSpan(class_ptr);
MultiClassSortedIdx(predts, d_class_ptr, cache);
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
auto d_weights = info.weights_.ConstDeviceSpan();
/**
* Get total positive/negative
*/
auto labels = info.labels.View(device);
auto n_samples = info.num_row_;
dh::caching_device_vector<Pair> totals(n_classes);
auto key_it =
dh::MakeTransformIterator<size_t>(thrust::make_counting_iterator(0ul),
[n_samples] XGBOOST_DEVICE(size_t i) {
return i / n_samples; // class id
});
auto get_weight = common::OptionalWeights{d_weights};
auto val_it = dh::MakeTransformIterator<thrust::pair<double, double>>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) {
auto idx = d_sorted_idx[i] % n_samples;
auto w = get_weight[idx];
auto class_id = i / n_samples;
auto y = labels(idx) == class_id;
return thrust::make_pair(y * w, (1.0f - y) * w);
});
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::reduce_by_key(thrust::hip::par(alloc), key_it,
key_it + predts.size(), val_it,
thrust::make_discard_iterator(), totals.begin(),
thrust::equal_to<size_t>{}, PairPlus<double, double>{});
/**
* Calculate AUC
*/
auto d_totals = dh::ToSpan(totals);
auto fn = [d_totals] XGBOOST_DEVICE(double fp_prev, double fp, double tp_prev,
double tp, size_t class_id) {
auto total_pos = d_totals[class_id].first;
return detail::CalcDeltaPRAUC(fp_prev, fp, tp_prev, tp,
d_totals[class_id].first);
};
return GPUMultiClassAUCOVR<false>(info, device, d_class_ptr, n_classes, cache, fn);
}
template <typename Fn>
std::pair<double, uint32_t>
GPURankingPRAUCImpl(common::Span<float const> predts, MetaInfo const &info,
common::Span<uint32_t> d_group_ptr, int32_t device,
std::shared_ptr<DeviceAUCCache> cache, Fn area_fn) {
/**
* Sorted idx
*/
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
auto labels = info.labels.View(device);
auto weights = info.weights_.ConstDeviceSpan();
uint32_t n_groups = static_cast<uint32_t>(info.group_ptr_.size() - 1);
/**
* Linear scan
*/
size_t n_samples = labels.Shape(0);
dh::caching_device_vector<double> d_auc(n_groups, 0);
auto get_weight = common::OptionalWeights{weights};
auto d_fptp = dh::ToSpan(cache->fptp);
auto get_fp_tp = [=] XGBOOST_DEVICE(size_t i) {
size_t idx = d_sorted_idx[i];
size_t group_id = dh::SegmentId(d_group_ptr, idx);
float label = labels(idx);
float w = get_weight[group_id];
float fp = (1.0 - label) * w;
float tp = label * w;
return thrust::make_pair(fp, tp);
}; // NOLINT
dh::LaunchN(d_sorted_idx.size(),
[=] XGBOOST_DEVICE(size_t i) { d_fptp[i] = get_fp_tp(i); });
/**
* Handle duplicated predictions
*/
dh::XGBDeviceAllocator<char> alloc;
auto d_unique_idx = dh::ToSpan(cache->unique_idx);
dh::Iota(d_unique_idx);
auto uni_key = dh::MakeTransformIterator<thrust::pair<uint32_t, float>>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
auto idx = d_sorted_idx[i];
bst_group_t group_id = dh::SegmentId(d_group_ptr, idx);
float predt = predts[idx];
return thrust::make_pair(group_id, predt);
});
// unique values are sparse, so we need a CSR style indptr
dh::TemporaryArray<uint32_t> unique_class_ptr(d_group_ptr.size());
auto d_unique_class_ptr = dh::ToSpan(unique_class_ptr);
auto n_uniques = dh::SegmentedUniqueByKey(
thrust::hip::par(alloc),
dh::tbegin(d_group_ptr),
dh::tend(d_group_ptr),
uni_key,
uni_key + d_sorted_idx.size(),
dh::tbegin(d_unique_idx),
d_unique_class_ptr.data(),
dh::tbegin(d_unique_idx),
thrust::equal_to<thrust::pair<uint32_t, float>>{});
d_unique_idx = d_unique_idx.subspan(0, n_uniques);
auto get_group_id = [=] XGBOOST_DEVICE(size_t idx) {
return dh::SegmentId(d_group_ptr, idx);
};
SegmentedFPTP(d_fptp, get_group_id);
// scatter unique FP_PREV/TP_PREV values
auto d_neg_pos = dh::ToSpan(cache->neg_pos);
dh::LaunchN(d_unique_idx.size(), [=] XGBOOST_DEVICE(size_t i) {
if (thrust::binary_search(thrust::seq, d_unique_class_ptr.cbegin(),
d_unique_class_ptr.cend(),
i)) { // first unique index is 0
d_neg_pos[d_unique_idx[i]] = {0, 0};
return;
}
auto group_idx = dh::SegmentId(d_group_ptr, d_unique_idx[i]);
d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1];
if (i == common::LastOf(group_idx, d_unique_class_ptr)) {
// last one needs to be included.
size_t last = d_unique_idx[common::LastOf(group_idx, d_unique_class_ptr)];
d_neg_pos[common::LastOf(group_idx, d_group_ptr)] = d_fptp[last - 1];
return;
}
});
/**
* Reduce the result for each group
*/
auto s_d_auc = dh::ToSpan(d_auc);
SegmentedReduceAUC(d_unique_idx, d_group_ptr, d_unique_class_ptr, cache,
area_fn, get_group_id, s_d_auc);
/**
* Scale the groups with number of samples for each group.
*/
double auc;
uint32_t invalid_groups;
{
auto it = dh::MakeTransformIterator<thrust::pair<double, uint32_t>>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t g) {
double fp, tp;
thrust::tie(fp, tp) = d_fptp[common::LastOf(g, d_group_ptr)];
double area = fp * tp;
auto n_documents = d_group_ptr[g + 1] - d_group_ptr[g];
if (area > 0 && n_documents >= 2) {
return thrust::make_pair(s_d_auc[g], static_cast<uint32_t>(0));
}
return thrust::make_pair(0.0, static_cast<uint32_t>(1));
});
thrust::tie(auc, invalid_groups) = thrust::reduce(
thrust::hip::par(alloc), it, it + n_groups,
thrust::pair<double, uint32_t>(0.0, 0), PairPlus<double, uint32_t>{});
}
return std::make_pair(auc, n_groups - invalid_groups);
}
std::pair<double, std::uint32_t> GPURankingPRAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
dh::safe_cuda(hipSetDevice(device));
if (predts.empty()) {
return std::make_pair(0.0, static_cast<uint32_t>(0));
}
auto &cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);
dh::device_vector<bst_group_t> group_ptr(info.group_ptr_.size());
thrust::copy(info.group_ptr_.begin(), info.group_ptr_.end(), group_ptr.begin());
auto d_group_ptr = dh::ToSpan(group_ptr);
CHECK_GE(info.group_ptr_.size(), 1) << "Must have at least 1 query group for LTR.";
size_t n_groups = info.group_ptr_.size() - 1;
/**
* Create sorted index for each group
*/
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
dh::SegmentedArgSort<false>(predts, d_group_ptr, d_sorted_idx);
dh::XGBDeviceAllocator<char> alloc;
auto labels = info.labels.View(device);
if (thrust::any_of(thrust::hip::par(alloc), dh::tbegin(labels.Values()),
dh::tend(labels.Values()), PRAUCLabelInvalid{})) {
InvalidLabels();
}
/**
* Get total positive/negative for each group.
*/
auto d_weights = info.weights_.ConstDeviceSpan();
dh::caching_device_vector<thrust::pair<double, double>> totals(n_groups);
auto key_it = dh::MakeTransformIterator<size_t>(
thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(size_t i) { return dh::SegmentId(d_group_ptr, i); });
auto val_it = dh::MakeTransformIterator<Pair>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) {
float w = 1.0f;
if (!d_weights.empty()) {
// Avoid a binary search if the groups are not weighted.
auto g = dh::SegmentId(d_group_ptr, i);
w = d_weights[g];
}
auto y = labels(i);
return thrust::make_pair(y * w, (1.0 - y) * w);
});
thrust::reduce_by_key(thrust::hip::par(alloc), key_it,
key_it + predts.size(), val_it,
thrust::make_discard_iterator(), totals.begin(),
thrust::equal_to<size_t>{}, PairPlus<double, double>{});
/**
* Calculate AUC
*/
auto d_totals = dh::ToSpan(totals);
auto fn = [d_totals] XGBOOST_DEVICE(double fp_prev, double fp, double tp_prev,
double tp, size_t group_id) {
auto total_pos = d_totals[group_id].first;
return detail::CalcDeltaPRAUC(fp_prev, fp, tp_prev, tp,
d_totals[group_id].first);
};
return GPURankingPRAUCImpl(predts, info, d_group_ptr, device, cache, fn);
}
} // namespace metric
} // namespace xgboost
| 299a17ce3400c19df7a199a151dc0d9a0071c2ab.cu | /**
* Copyright 2021-2023 by XGBoost Contributors
*/
#include <thrust/scan.h>
#include <algorithm>
#include <cassert>
#include <cub/cub.cuh>
#include <limits>
#include <memory>
#include <tuple>
#include <utility>
#include "../collective/device_communicator.cuh"
#include "../common/optional_weight.h" // OptionalWeights
#include "../common/ranking_utils.cuh"
#include "auc.h"
#include "xgboost/data.h"
#include "xgboost/span.h"
namespace xgboost {
namespace metric {
namespace {
// Pair of FP/TP
using Pair = thrust::pair<double, double>;
template <typename T, typename U, typename P = thrust::pair<T, U>>
struct PairPlus : public thrust::binary_function<P, P, P> {
XGBOOST_DEVICE P operator()(P const& l, P const& r) const {
return thrust::make_pair(l.first + r.first, l.second + r.second);
}
};
} // namespace
/**
* A cache to GPU data to avoid reallocating memory.
*/
struct DeviceAUCCache {
// index sorted by prediction value
dh::device_vector<size_t> sorted_idx;
// track FP/TP for computation on trapezoid area
dh::device_vector<Pair> fptp;
// track FP_PREV/TP_PREV for computation on trapezoid area
dh::device_vector<Pair> neg_pos;
// index of unique prediction values.
dh::device_vector<size_t> unique_idx;
// p^T: transposed prediction matrix, used by MultiClassAUC
dh::device_vector<float> predts_t;
void Init(common::Span<float const> predts, bool is_multi) {
if (sorted_idx.size() != predts.size()) {
sorted_idx.resize(predts.size());
fptp.resize(sorted_idx.size());
unique_idx.resize(sorted_idx.size());
neg_pos.resize(sorted_idx.size());
if (is_multi) {
predts_t.resize(sorted_idx.size());
}
}
}
};
template <bool is_multi>
void InitCacheOnce(common::Span<float const> predts, std::shared_ptr<DeviceAUCCache> *p_cache) {
auto& cache = *p_cache;
if (!cache) {
cache.reset(new DeviceAUCCache);
}
cache->Init(predts, is_multi);
}
/**
* The GPU implementation uses same calculation as CPU with a few more steps to distribute
* work across threads:
*
* - Run scan to obtain TP/FP values, which are right coordinates of trapezoid.
* - Find distinct prediction values and get the corresponding FP_PREV/TP_PREV value,
* which are left coordinates of trapezoids.
* - Reduce the scan array into 1 AUC value.
*/
template <typename Fn>
std::tuple<double, double, double>
GPUBinaryAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, common::Span<size_t const> d_sorted_idx,
Fn area_fn, std::shared_ptr<DeviceAUCCache> cache) {
auto labels = info.labels.View(device);
auto weights = info.weights_.ConstDeviceSpan();
dh::safe_cuda(cudaSetDevice(device));
CHECK_NE(labels.Size(), 0);
CHECK_EQ(labels.Size(), predts.size());
/**
* Linear scan
*/
auto get_weight = common::OptionalWeights{weights};
auto get_fp_tp = [=]XGBOOST_DEVICE(size_t i) {
size_t idx = d_sorted_idx[i];
float label = labels(idx);
float w = get_weight[d_sorted_idx[i]];
float fp = (1.0 - label) * w;
float tp = label * w;
return thrust::make_pair(fp, tp);
}; // NOLINT
auto d_fptp = dh::ToSpan(cache->fptp);
dh::LaunchN(d_sorted_idx.size(),
[=] XGBOOST_DEVICE(size_t i) { d_fptp[i] = get_fp_tp(i); });
dh::XGBDeviceAllocator<char> alloc;
auto d_unique_idx = dh::ToSpan(cache->unique_idx);
dh::Iota(d_unique_idx);
auto uni_key = dh::MakeTransformIterator<float>(
thrust::make_counting_iterator(0),
[=] XGBOOST_DEVICE(size_t i) { return predts[d_sorted_idx[i]]; });
auto end_unique = thrust::unique_by_key_copy(
thrust::cuda::par(alloc), uni_key, uni_key + d_sorted_idx.size(),
dh::tbegin(d_unique_idx), thrust::make_discard_iterator(),
dh::tbegin(d_unique_idx));
d_unique_idx = d_unique_idx.subspan(0, end_unique.second - dh::tbegin(d_unique_idx));
dh::InclusiveScan(dh::tbegin(d_fptp), dh::tbegin(d_fptp),
PairPlus<double, double>{}, d_fptp.size());
auto d_neg_pos = dh::ToSpan(cache->neg_pos);
// scatter unique negaive/positive values
// shift to right by 1 with initial value being 0
dh::LaunchN(d_unique_idx.size(), [=] XGBOOST_DEVICE(size_t i) {
if (d_unique_idx[i] == 0) { // first unique index is 0
assert(i == 0);
d_neg_pos[0] = {0, 0};
return;
}
d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1];
if (i == d_unique_idx.size() - 1) {
// last one needs to be included, may override above assignment if the last
// prediction value is distinct from previous one.
d_neg_pos.back() = d_fptp[d_unique_idx[i] - 1];
return;
}
});
auto in = dh::MakeTransformIterator<double>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
double fp, tp;
double fp_prev, tp_prev;
if (i == 0) {
// handle the last element
thrust::tie(fp, tp) = d_fptp.back();
thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx.back()];
} else {
thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1];
thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]];
}
return area_fn(fp_prev, fp, tp_prev, tp);
});
Pair last = cache->fptp.back();
double auc = thrust::reduce(thrust::cuda::par(alloc), in, in + d_unique_idx.size());
return std::make_tuple(last.first, last.second, auc);
}
std::tuple<double, double, double> GPUBinaryROCAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
auto &cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);
/**
* Create sorted index for each class
*/
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
dh::ArgSort<false>(predts, d_sorted_idx);
// Create lambda to avoid pass function pointer.
return GPUBinaryAUC(
predts, info, device, d_sorted_idx,
[] XGBOOST_DEVICE(double x0, double x1, double y0, double y1) -> double {
return TrapezoidArea(x0, x1, y0, y1);
},
cache);
}
void Transpose(common::Span<float const> in, common::Span<float> out, size_t m,
size_t n) {
CHECK_EQ(in.size(), out.size());
CHECK_EQ(in.size(), m * n);
dh::LaunchN(in.size(), [=] XGBOOST_DEVICE(size_t i) {
size_t col = i / m;
size_t row = i % m;
size_t idx = row * n + col;
out[i] = in[idx];
});
}
double ScaleClasses(common::Span<double> results, common::Span<double> local_area,
common::Span<double> tp, common::Span<double> auc, size_t n_classes) {
dh::XGBDeviceAllocator<char> alloc;
if (collective::IsDistributed()) {
int32_t device = dh::CurrentDevice();
CHECK_EQ(dh::CudaGetPointerDevice(results.data()), device);
auto* communicator = collective::Communicator::GetDevice(device);
communicator->AllReduceSum(results.data(), results.size());
}
auto reduce_in = dh::MakeTransformIterator<Pair>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
if (local_area[i] > 0) {
return thrust::make_pair(auc[i] / local_area[i] * tp[i], tp[i]);
}
return thrust::make_pair(std::numeric_limits<double>::quiet_NaN(), 0.0);
});
double tp_sum;
double auc_sum;
thrust::tie(auc_sum, tp_sum) =
thrust::reduce(thrust::cuda::par(alloc), reduce_in, reduce_in + n_classes,
Pair{0.0, 0.0}, PairPlus<double, double>{});
if (tp_sum != 0 && !std::isnan(auc_sum)) {
auc_sum /= tp_sum;
} else {
return std::numeric_limits<double>::quiet_NaN();
}
return auc_sum;
}
/**
* Calculate FP/TP for multi-class and PR-AUC ranking. `segment_id` is a function for
* getting class id or group id given scan index.
*/
template <typename Fn>
void SegmentedFPTP(common::Span<Pair> d_fptp, Fn segment_id) {
using Triple = thrust::tuple<uint32_t, double, double>;
// expand to tuple to include idx
auto fptp_it_in = dh::MakeTransformIterator<Triple>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
return thrust::make_tuple(i, d_fptp[i].first, d_fptp[i].second);
});
// shrink down to pair
auto fptp_it_out = thrust::make_transform_output_iterator(
dh::TypedDiscard<Triple>{}, [d_fptp] XGBOOST_DEVICE(Triple const &t) {
d_fptp[thrust::get<0>(t)] =
thrust::make_pair(thrust::get<1>(t), thrust::get<2>(t));
return t;
});
dh::InclusiveScan(
fptp_it_in, fptp_it_out,
[=] XGBOOST_DEVICE(Triple const &l, Triple const &r) {
uint32_t l_gid = segment_id(thrust::get<0>(l));
uint32_t r_gid = segment_id(thrust::get<0>(r));
if (l_gid != r_gid) {
return r;
}
return Triple(thrust::get<0>(r),
thrust::get<1>(l) + thrust::get<1>(r), // fp
thrust::get<2>(l) + thrust::get<2>(r)); // tp
},
d_fptp.size());
}
/**
* Reduce the values of AUC for each group/class.
*/
template <typename Area, typename Seg>
void SegmentedReduceAUC(common::Span<size_t const> d_unique_idx,
common::Span<uint32_t const> d_class_ptr,
common::Span<uint32_t const> d_unique_class_ptr,
std::shared_ptr<DeviceAUCCache> cache,
Area area_fn,
Seg segment_id,
common::Span<double> d_auc) {
auto d_fptp = dh::ToSpan(cache->fptp);
auto d_neg_pos = dh::ToSpan(cache->neg_pos);
dh::XGBDeviceAllocator<char> alloc;
auto key_in = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
size_t class_id = segment_id(d_unique_idx[i]);
return class_id;
});
auto val_in = dh::MakeTransformIterator<double>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
size_t class_id = segment_id(d_unique_idx[i]);
double fp, tp, fp_prev, tp_prev;
if (i == d_unique_class_ptr[class_id]) {
// first item is ignored, we use this thread to calculate the last item
thrust::tie(fp, tp) = d_fptp[common::LastOf(class_id, d_class_ptr)];
thrust::tie(fp_prev, tp_prev) =
d_neg_pos[d_unique_idx[common::LastOf(class_id, d_unique_class_ptr)]];
} else {
thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1];
thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]];
}
double auc = area_fn(fp_prev, fp, tp_prev, tp, class_id);
return auc;
});
thrust::reduce_by_key(thrust::cuda::par(alloc), key_in,
key_in + d_unique_idx.size(), val_in,
thrust::make_discard_iterator(), dh::tbegin(d_auc));
}
/**
* MultiClass implementation is similar to binary classification, except we need to split
* up each class in all kernels.
*/
template <bool scale, typename Fn>
double GPUMultiClassAUCOVR(MetaInfo const &info, int32_t device, common::Span<uint32_t> d_class_ptr,
size_t n_classes, std::shared_ptr<DeviceAUCCache> cache, Fn area_fn) {
dh::safe_cuda(cudaSetDevice(device));
/**
* Sorted idx
*/
auto d_predts_t = dh::ToSpan(cache->predts_t);
// Index is sorted within class.
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
auto labels = info.labels.View(device);
auto weights = info.weights_.ConstDeviceSpan();
size_t n_samples = labels.Shape(0);
if (n_samples == 0) {
dh::TemporaryArray<double> resutls(n_classes * 4, 0.0f);
auto d_results = dh::ToSpan(resutls);
dh::LaunchN(n_classes * 4,
[=] XGBOOST_DEVICE(size_t i) { d_results[i] = 0.0f; });
auto local_area = d_results.subspan(0, n_classes);
auto tp = d_results.subspan(2 * n_classes, n_classes);
auto auc = d_results.subspan(3 * n_classes, n_classes);
return ScaleClasses(d_results, local_area, tp, auc, n_classes);
}
/**
* Linear scan
*/
dh::caching_device_vector<double> d_auc(n_classes, 0);
auto get_weight = common::OptionalWeights{weights};
auto d_fptp = dh::ToSpan(cache->fptp);
auto get_fp_tp = [=]XGBOOST_DEVICE(size_t i) {
size_t idx = d_sorted_idx[i];
size_t class_id = i / n_samples;
// labels is a vector of size n_samples.
float label = labels(idx % n_samples) == class_id;
float w = get_weight[d_sorted_idx[i] % n_samples];
float fp = (1.0 - label) * w;
float tp = label * w;
return thrust::make_pair(fp, tp);
}; // NOLINT
dh::LaunchN(d_sorted_idx.size(),
[=] XGBOOST_DEVICE(size_t i) { d_fptp[i] = get_fp_tp(i); });
/**
* Handle duplicated predictions
*/
dh::XGBDeviceAllocator<char> alloc;
auto d_unique_idx = dh::ToSpan(cache->unique_idx);
dh::Iota(d_unique_idx);
auto uni_key = dh::MakeTransformIterator<thrust::pair<uint32_t, float>>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
uint32_t class_id = i / n_samples;
float predt = d_predts_t[d_sorted_idx[i]];
return thrust::make_pair(class_id, predt);
});
// unique values are sparse, so we need a CSR style indptr
dh::TemporaryArray<uint32_t> unique_class_ptr(d_class_ptr.size());
auto d_unique_class_ptr = dh::ToSpan(unique_class_ptr);
auto n_uniques = dh::SegmentedUniqueByKey(
thrust::cuda::par(alloc),
dh::tbegin(d_class_ptr),
dh::tend(d_class_ptr),
uni_key,
uni_key + d_sorted_idx.size(),
dh::tbegin(d_unique_idx),
d_unique_class_ptr.data(),
dh::tbegin(d_unique_idx),
thrust::equal_to<thrust::pair<uint32_t, float>>{});
d_unique_idx = d_unique_idx.subspan(0, n_uniques);
auto get_class_id = [=] XGBOOST_DEVICE(size_t idx) { return idx / n_samples; };
SegmentedFPTP(d_fptp, get_class_id);
// scatter unique FP_PREV/TP_PREV values
auto d_neg_pos = dh::ToSpan(cache->neg_pos);
// When dataset is not empty, each class must have at least 1 (unique) sample
// prediction, so no need to handle special case.
dh::LaunchN(d_unique_idx.size(), [=] XGBOOST_DEVICE(size_t i) {
if (d_unique_idx[i] % n_samples == 0) { // first unique index is 0
assert(d_unique_idx[i] % n_samples == 0);
d_neg_pos[d_unique_idx[i]] = {0, 0}; // class_id * n_samples = i
return;
}
uint32_t class_id = d_unique_idx[i] / n_samples;
d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1];
if (i == common::LastOf(class_id, d_unique_class_ptr)) {
// last one needs to be included.
size_t last = d_unique_idx[common::LastOf(class_id, d_unique_class_ptr)];
d_neg_pos[common::LastOf(class_id, d_class_ptr)] = d_fptp[last - 1];
return;
}
});
/**
* Reduce the result for each class
*/
auto s_d_auc = dh::ToSpan(d_auc);
SegmentedReduceAUC(d_unique_idx, d_class_ptr, d_unique_class_ptr, cache,
area_fn, get_class_id, s_d_auc);
/**
* Scale the classes with number of samples for each class.
*/
dh::TemporaryArray<double> resutls(n_classes * 4);
auto d_results = dh::ToSpan(resutls);
auto local_area = d_results.subspan(0, n_classes);
auto fp = d_results.subspan(n_classes, n_classes);
auto tp = d_results.subspan(2 * n_classes, n_classes);
auto auc = d_results.subspan(3 * n_classes, n_classes);
dh::LaunchN(n_classes, [=] XGBOOST_DEVICE(size_t c) {
auc[c] = s_d_auc[c];
auto last = d_fptp[n_samples * c + (n_samples - 1)];
fp[c] = last.first;
if (scale) {
local_area[c] = last.first * last.second;
tp[c] = last.second;
} else {
local_area[c] = 1.0f;
tp[c] = 1.0f;
}
});
return ScaleClasses(d_results, local_area, tp, auc, n_classes);
}
void MultiClassSortedIdx(common::Span<float const> predts,
common::Span<uint32_t> d_class_ptr,
std::shared_ptr<DeviceAUCCache> cache) {
size_t n_classes = d_class_ptr.size() - 1;
auto d_predts_t = dh::ToSpan(cache->predts_t);
auto n_samples = d_predts_t.size() / n_classes;
if (n_samples == 0) {
return;
}
Transpose(predts, d_predts_t, n_samples, n_classes);
dh::LaunchN(n_classes + 1,
[=] XGBOOST_DEVICE(size_t i) { d_class_ptr[i] = i * n_samples; });
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx);
}
double GPUMultiClassROCAUC(common::Span<float const> predts, MetaInfo const &info,
std::int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache,
std::size_t n_classes) {
auto& cache = *p_cache;
InitCacheOnce<true>(predts, p_cache);
/**
* Create sorted index for each class
*/
dh::TemporaryArray<uint32_t> class_ptr(n_classes + 1, 0);
MultiClassSortedIdx(predts, dh::ToSpan(class_ptr), cache);
auto fn = [] XGBOOST_DEVICE(double fp_prev, double fp, double tp_prev,
double tp, size_t /*class_id*/) {
return TrapezoidArea(fp_prev, fp, tp_prev, tp);
};
return GPUMultiClassAUCOVR<true>(info, device, dh::ToSpan(class_ptr), n_classes, cache, fn);
}
namespace {
struct RankScanItem {
size_t idx;
double predt;
double w;
bst_group_t group_id;
};
} // anonymous namespace
std::pair<double, std::uint32_t> GPURankingAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
auto& cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);
dh::caching_device_vector<bst_group_t> group_ptr(info.group_ptr_);
dh::XGBCachingDeviceAllocator<char> alloc;
auto d_group_ptr = dh::ToSpan(group_ptr);
/**
* Validate the dataset
*/
auto check_it = dh::MakeTransformIterator<size_t>(
thrust::make_counting_iterator(0),
[=] XGBOOST_DEVICE(size_t i) { return d_group_ptr[i + 1] - d_group_ptr[i]; });
size_t n_valid = thrust::count_if(
thrust::cuda::par(alloc), check_it, check_it + group_ptr.size() - 1,
[=] XGBOOST_DEVICE(size_t len) { return len >= 3; });
if (n_valid < info.group_ptr_.size() - 1) {
InvalidGroupAUC();
}
if (n_valid == 0) {
return std::make_pair(0.0, 0);
}
/**
* Sort the labels
*/
auto d_labels = info.labels.View(device);
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
dh::SegmentedArgSort<false>(d_labels.Values(), d_group_ptr, d_sorted_idx);
auto d_weights = info.weights_.ConstDeviceSpan();
dh::caching_device_vector<size_t> threads_group_ptr(group_ptr.size(), 0);
auto d_threads_group_ptr = dh::ToSpan(threads_group_ptr);
// Use max to represent triangle
auto n_threads = common::SegmentedTrapezoidThreads(
d_group_ptr, d_threads_group_ptr, std::numeric_limits<size_t>::max());
CHECK_LT(n_threads, std::numeric_limits<int32_t>::max());
// get the coordinate in nested summation
auto get_i_j = [=]XGBOOST_DEVICE(size_t idx, size_t query_group_idx) {
auto data_group_begin = d_group_ptr[query_group_idx];
size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin;
auto thread_group_begin = d_threads_group_ptr[query_group_idx];
auto idx_in_thread_group = idx - thread_group_begin;
size_t i, j;
common::UnravelTrapeziodIdx(idx_in_thread_group, n_samples, &i, &j);
// we use global index among all groups for sorted idx, so i, j should also be global
// index.
i += data_group_begin;
j += data_group_begin;
return thrust::make_pair(i, j);
}; // NOLINT
auto in = dh::MakeTransformIterator<RankScanItem>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t idx) {
bst_group_t query_group_idx = dh::SegmentId(d_threads_group_ptr, idx);
auto data_group_begin = d_group_ptr[query_group_idx];
size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin;
if (n_samples < 3) {
// at least 3 documents are required.
return RankScanItem{idx, 0, 0, query_group_idx};
}
size_t i, j;
thrust::tie(i, j) = get_i_j(idx, query_group_idx);
float predt = predts[d_sorted_idx[i]] - predts[d_sorted_idx[j]];
float w = common::Sqr(d_weights.empty() ? 1.0f : d_weights[query_group_idx]);
if (predt > 0) {
predt = 1.0;
} else if (predt == 0) {
predt = 0.5;
} else {
predt = 0;
}
predt *= w;
return RankScanItem{idx, predt, w, query_group_idx};
});
dh::TemporaryArray<double> d_auc(group_ptr.size() - 1);
auto s_d_auc = dh::ToSpan(d_auc);
auto out = thrust::make_transform_output_iterator(
dh::TypedDiscard<RankScanItem>{},
[=] XGBOOST_DEVICE(RankScanItem const &item) -> RankScanItem {
auto group_id = item.group_id;
assert(group_id < d_group_ptr.size());
auto data_group_begin = d_group_ptr[group_id];
size_t n_samples = d_group_ptr[group_id + 1] - data_group_begin;
// last item of current group
if (item.idx == common::LastOf(group_id, d_threads_group_ptr)) {
if (item.w > 0) {
s_d_auc[group_id] = item.predt / item.w;
} else {
s_d_auc[group_id] = 0;
}
}
return {}; // discard
});
dh::InclusiveScan(
in, out,
[] XGBOOST_DEVICE(RankScanItem const &l, RankScanItem const &r) {
if (l.group_id != r.group_id) {
return r;
}
return RankScanItem{r.idx, l.predt + r.predt, l.w + r.w, l.group_id};
},
n_threads);
/**
* Scale the AUC with number of items in each group.
*/
double auc = thrust::reduce(thrust::cuda::par(alloc), dh::tbegin(s_d_auc),
dh::tend(s_d_auc), 0.0);
return std::make_pair(auc, n_valid);
}
std::tuple<double, double, double> GPUBinaryPRAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
auto& cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);
/**
* Create sorted index for each class
*/
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
dh::ArgSort<false>(predts, d_sorted_idx);
auto labels = info.labels.View(device);
auto d_weights = info.weights_.ConstDeviceSpan();
auto get_weight = common::OptionalWeights{d_weights};
auto it = dh::MakeTransformIterator<Pair>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) {
auto w = get_weight[d_sorted_idx[i]];
return thrust::make_pair(labels(d_sorted_idx[i]) * w,
(1.0f - labels(d_sorted_idx[i])) * w);
});
dh::XGBCachingDeviceAllocator<char> alloc;
double total_pos, total_neg;
thrust::tie(total_pos, total_neg) =
thrust::reduce(thrust::cuda::par(alloc), it, it + labels.Size(),
Pair{0.0, 0.0}, PairPlus<double, double>{});
if (total_pos <= 0.0 || total_neg <= 0.0) {
return {0.0f, 0.0f, 0.0f};
}
auto fn = [total_pos] XGBOOST_DEVICE(double fp_prev, double fp, double tp_prev,
double tp) {
return detail::CalcDeltaPRAUC(fp_prev, fp, tp_prev, tp, total_pos);
};
double fp, tp, auc;
std::tie(fp, tp, auc) = GPUBinaryAUC(predts, info, device, d_sorted_idx, fn, cache);
return std::make_tuple(1.0, 1.0, auc);
}
double GPUMultiClassPRAUC(common::Span<float const> predts, MetaInfo const &info,
std::int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache,
std::size_t n_classes) {
auto& cache = *p_cache;
InitCacheOnce<true>(predts, p_cache);
/**
* Create sorted index for each class
*/
dh::TemporaryArray<uint32_t> class_ptr(n_classes + 1, 0);
auto d_class_ptr = dh::ToSpan(class_ptr);
MultiClassSortedIdx(predts, d_class_ptr, cache);
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
auto d_weights = info.weights_.ConstDeviceSpan();
/**
* Get total positive/negative
*/
auto labels = info.labels.View(device);
auto n_samples = info.num_row_;
dh::caching_device_vector<Pair> totals(n_classes);
auto key_it =
dh::MakeTransformIterator<size_t>(thrust::make_counting_iterator(0ul),
[n_samples] XGBOOST_DEVICE(size_t i) {
return i / n_samples; // class id
});
auto get_weight = common::OptionalWeights{d_weights};
auto val_it = dh::MakeTransformIterator<thrust::pair<double, double>>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) {
auto idx = d_sorted_idx[i] % n_samples;
auto w = get_weight[idx];
auto class_id = i / n_samples;
auto y = labels(idx) == class_id;
return thrust::make_pair(y * w, (1.0f - y) * w);
});
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::reduce_by_key(thrust::cuda::par(alloc), key_it,
key_it + predts.size(), val_it,
thrust::make_discard_iterator(), totals.begin(),
thrust::equal_to<size_t>{}, PairPlus<double, double>{});
/**
* Calculate AUC
*/
auto d_totals = dh::ToSpan(totals);
auto fn = [d_totals] XGBOOST_DEVICE(double fp_prev, double fp, double tp_prev,
double tp, size_t class_id) {
auto total_pos = d_totals[class_id].first;
return detail::CalcDeltaPRAUC(fp_prev, fp, tp_prev, tp,
d_totals[class_id].first);
};
return GPUMultiClassAUCOVR<false>(info, device, d_class_ptr, n_classes, cache, fn);
}
template <typename Fn>
std::pair<double, uint32_t>
GPURankingPRAUCImpl(common::Span<float const> predts, MetaInfo const &info,
common::Span<uint32_t> d_group_ptr, int32_t device,
std::shared_ptr<DeviceAUCCache> cache, Fn area_fn) {
/**
* Sorted idx
*/
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
auto labels = info.labels.View(device);
auto weights = info.weights_.ConstDeviceSpan();
uint32_t n_groups = static_cast<uint32_t>(info.group_ptr_.size() - 1);
/**
* Linear scan
*/
size_t n_samples = labels.Shape(0);
dh::caching_device_vector<double> d_auc(n_groups, 0);
auto get_weight = common::OptionalWeights{weights};
auto d_fptp = dh::ToSpan(cache->fptp);
auto get_fp_tp = [=] XGBOOST_DEVICE(size_t i) {
size_t idx = d_sorted_idx[i];
size_t group_id = dh::SegmentId(d_group_ptr, idx);
float label = labels(idx);
float w = get_weight[group_id];
float fp = (1.0 - label) * w;
float tp = label * w;
return thrust::make_pair(fp, tp);
}; // NOLINT
dh::LaunchN(d_sorted_idx.size(),
[=] XGBOOST_DEVICE(size_t i) { d_fptp[i] = get_fp_tp(i); });
/**
* Handle duplicated predictions
*/
dh::XGBDeviceAllocator<char> alloc;
auto d_unique_idx = dh::ToSpan(cache->unique_idx);
dh::Iota(d_unique_idx);
auto uni_key = dh::MakeTransformIterator<thrust::pair<uint32_t, float>>(
thrust::make_counting_iterator(0), [=] XGBOOST_DEVICE(size_t i) {
auto idx = d_sorted_idx[i];
bst_group_t group_id = dh::SegmentId(d_group_ptr, idx);
float predt = predts[idx];
return thrust::make_pair(group_id, predt);
});
// unique values are sparse, so we need a CSR style indptr
dh::TemporaryArray<uint32_t> unique_class_ptr(d_group_ptr.size());
auto d_unique_class_ptr = dh::ToSpan(unique_class_ptr);
auto n_uniques = dh::SegmentedUniqueByKey(
thrust::cuda::par(alloc),
dh::tbegin(d_group_ptr),
dh::tend(d_group_ptr),
uni_key,
uni_key + d_sorted_idx.size(),
dh::tbegin(d_unique_idx),
d_unique_class_ptr.data(),
dh::tbegin(d_unique_idx),
thrust::equal_to<thrust::pair<uint32_t, float>>{});
d_unique_idx = d_unique_idx.subspan(0, n_uniques);
auto get_group_id = [=] XGBOOST_DEVICE(size_t idx) {
return dh::SegmentId(d_group_ptr, idx);
};
SegmentedFPTP(d_fptp, get_group_id);
// scatter unique FP_PREV/TP_PREV values
auto d_neg_pos = dh::ToSpan(cache->neg_pos);
dh::LaunchN(d_unique_idx.size(), [=] XGBOOST_DEVICE(size_t i) {
if (thrust::binary_search(thrust::seq, d_unique_class_ptr.cbegin(),
d_unique_class_ptr.cend(),
i)) { // first unique index is 0
d_neg_pos[d_unique_idx[i]] = {0, 0};
return;
}
auto group_idx = dh::SegmentId(d_group_ptr, d_unique_idx[i]);
d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1];
if (i == common::LastOf(group_idx, d_unique_class_ptr)) {
// last one needs to be included.
size_t last = d_unique_idx[common::LastOf(group_idx, d_unique_class_ptr)];
d_neg_pos[common::LastOf(group_idx, d_group_ptr)] = d_fptp[last - 1];
return;
}
});
/**
* Reduce the result for each group
*/
auto s_d_auc = dh::ToSpan(d_auc);
SegmentedReduceAUC(d_unique_idx, d_group_ptr, d_unique_class_ptr, cache,
area_fn, get_group_id, s_d_auc);
/**
* Scale the groups with number of samples for each group.
*/
double auc;
uint32_t invalid_groups;
{
auto it = dh::MakeTransformIterator<thrust::pair<double, uint32_t>>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t g) {
double fp, tp;
thrust::tie(fp, tp) = d_fptp[common::LastOf(g, d_group_ptr)];
double area = fp * tp;
auto n_documents = d_group_ptr[g + 1] - d_group_ptr[g];
if (area > 0 && n_documents >= 2) {
return thrust::make_pair(s_d_auc[g], static_cast<uint32_t>(0));
}
return thrust::make_pair(0.0, static_cast<uint32_t>(1));
});
thrust::tie(auc, invalid_groups) = thrust::reduce(
thrust::cuda::par(alloc), it, it + n_groups,
thrust::pair<double, uint32_t>(0.0, 0), PairPlus<double, uint32_t>{});
}
return std::make_pair(auc, n_groups - invalid_groups);
}
std::pair<double, std::uint32_t> GPURankingPRAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
dh::safe_cuda(cudaSetDevice(device));
if (predts.empty()) {
return std::make_pair(0.0, static_cast<uint32_t>(0));
}
auto &cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);
dh::device_vector<bst_group_t> group_ptr(info.group_ptr_.size());
thrust::copy(info.group_ptr_.begin(), info.group_ptr_.end(), group_ptr.begin());
auto d_group_ptr = dh::ToSpan(group_ptr);
CHECK_GE(info.group_ptr_.size(), 1) << "Must have at least 1 query group for LTR.";
size_t n_groups = info.group_ptr_.size() - 1;
/**
* Create sorted index for each group
*/
auto d_sorted_idx = dh::ToSpan(cache->sorted_idx);
dh::SegmentedArgSort<false>(predts, d_group_ptr, d_sorted_idx);
dh::XGBDeviceAllocator<char> alloc;
auto labels = info.labels.View(device);
if (thrust::any_of(thrust::cuda::par(alloc), dh::tbegin(labels.Values()),
dh::tend(labels.Values()), PRAUCLabelInvalid{})) {
InvalidLabels();
}
/**
* Get total positive/negative for each group.
*/
auto d_weights = info.weights_.ConstDeviceSpan();
dh::caching_device_vector<thrust::pair<double, double>> totals(n_groups);
auto key_it = dh::MakeTransformIterator<size_t>(
thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(size_t i) { return dh::SegmentId(d_group_ptr, i); });
auto val_it = dh::MakeTransformIterator<Pair>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) {
float w = 1.0f;
if (!d_weights.empty()) {
// Avoid a binary search if the groups are not weighted.
auto g = dh::SegmentId(d_group_ptr, i);
w = d_weights[g];
}
auto y = labels(i);
return thrust::make_pair(y * w, (1.0 - y) * w);
});
thrust::reduce_by_key(thrust::cuda::par(alloc), key_it,
key_it + predts.size(), val_it,
thrust::make_discard_iterator(), totals.begin(),
thrust::equal_to<size_t>{}, PairPlus<double, double>{});
/**
* Calculate AUC
*/
auto d_totals = dh::ToSpan(totals);
auto fn = [d_totals] XGBOOST_DEVICE(double fp_prev, double fp, double tp_prev,
double tp, size_t group_id) {
auto total_pos = d_totals[group_id].first;
return detail::CalcDeltaPRAUC(fp_prev, fp, tp_prev, tp,
d_totals[group_id].first);
};
return GPURankingPRAUCImpl(predts, info, d_group_ptr, device, cache, fn);
}
} // namespace metric
} // namespace xgboost
|
1560217cd1485350dbb2ec0971bd82dbae3ac515.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2016, Xuhao Chen
#define BC_VARIANT "topo_base"
#include "bc.h"
#include "cuda_launch_config.hpp"
#include "cutil_subset.h"
#include "worklistc.h"
#include "timer.h"
#include <vector>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
__global__ void initialize(int m, int source, ScoreT *scores, int *path_counts, int *depths, ScoreT *deltas, bool *visited, bool *expanded) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < m) {
scores[id] = 0;
deltas[id] = 0;
expanded[id] = false;
if(id == source) {
visited[id] = true;
path_counts[id] = 1;
depths[id] = 0;
} else {
visited[id] = false;
path_counts[id] = 0;
depths[id] = -1;
}
}
}
// Shortest path calculation by forward BFS
__global__ void bc_forward(int m, int *row_offsets, int *column_indices, int *path_counts, int *depths, int depth, bool *changed, bool *visited, bool *expanded, int *nitems, int *queue, int queue_len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//int total_inputs = (m - 1) / (gridDim.x * blockDim.x) + 1;
//for (int src = tid; total_inputs > 0; src += blockDim.x * gridDim.x, total_inputs--) {
int src = tid;
if(src < m && visited[src] && !expanded[src]) {
expanded[src] = true;
int row_begin = row_offsets[src];
int row_end = row_offsets[src + 1];
for (int offset = row_begin; offset < row_end; ++ offset) {
int dst = column_indices[offset];
//int depth = depths[src] + 1;
if ((depths[dst] == -1) && (atomicCAS(&depths[dst], -1, depth)==-1)) {
int pos = atomicAdd(nitems, 1);
queue[queue_len + pos] = dst;
*changed = true;
}
if (depths[dst] == depth) {
atomicAdd(&path_counts[dst], path_counts[src]);
}
}
}
}
// Dependency accumulation by back propagation
__global__ void bc_reverse(int num, int *row_offsets, int *column_indices, int start, int *frontiers, ScoreT *scores, int *path_counts, int *depths, int depth, ScoreT *deltas) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num) {
int src = frontiers[start + id];
int row_begin = row_offsets[src];
int row_end = row_offsets[src + 1];
ScoreT delta_src = 0;
for (int offset = row_begin; offset < row_end; ++ offset) {
int dst = column_indices[offset];
//if(depths[dst] == depths[src] + 1) {
if(depths[dst] == depth + 1) {
delta_src += static_cast<ScoreT>(path_counts[src]) /
static_cast<ScoreT>(path_counts[dst]) * (1 + deltas[dst]);
}
}
deltas[src] = delta_src;
scores[src] += deltas[src];
}
}
__global__ void bc_update(int m, int *depths, bool *visited) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < m) {
if(depths[id] != -1 && !visited[id])
visited[id] = true;
}
}
__global__ void bc_normalize(int m, ScoreT *scores, ScoreT max_score) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < m) {
scores[tid] = scores[tid] / (max_score);
}
}
void BCSolver(int m, int nnz, int source, int *h_row_offsets, int *h_column_indices, ScoreT *h_scores) {
//print_device_info(0);
int zero = 0;
int *d_row_offsets, *d_column_indices;
CUDA_SAFE_CALL(hipMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int)));
CUDA_SAFE_CALL(hipMalloc((void **)&d_column_indices, nnz * sizeof(int)));
CUDA_SAFE_CALL(hipMemcpy(d_row_offsets, h_row_offsets, (m + 1) * sizeof(int), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_column_indices, h_column_indices, nnz * sizeof(int), hipMemcpyHostToDevice));
ScoreT *d_scores, *d_deltas;
CUDA_SAFE_CALL(hipMalloc((void **)&d_scores, sizeof(ScoreT) * m));
CUDA_SAFE_CALL(hipMalloc((void **)&d_deltas, sizeof(ScoreT) * m));
int *d_path_counts, *d_depths, *d_frontiers;
CUDA_SAFE_CALL(hipMalloc((void **)&d_path_counts, sizeof(int) * m));
CUDA_SAFE_CALL(hipMalloc((void **)&d_depths, sizeof(int) * m));
CUDA_SAFE_CALL(hipMalloc((void **)&d_frontiers, sizeof(int) * (m+1)));
bool *d_changed, h_changed, *d_visited, *d_expanded;
CUDA_SAFE_CALL(hipMalloc((void **)&d_changed, sizeof(bool)));
CUDA_SAFE_CALL(hipMalloc((void **)&d_visited, m * sizeof(bool)));
CUDA_SAFE_CALL(hipMalloc((void **)&d_expanded, m * sizeof(bool)));
int *d_nitems, h_nitems = 1;
CUDA_SAFE_CALL(hipMalloc((void **)&d_nitems, sizeof(int)));
int depth = 0;
vector<int> depth_index;
int nthreads = BLOCK_SIZE;
int nblocks = (m - 1) / nthreads + 1;
hipLaunchKernelGGL(( initialize) , dim3(nblocks), dim3(nthreads), 0, 0, m, source, d_scores, d_path_counts, d_depths, d_deltas, d_visited, d_expanded);
CudaTest("initializing failed");
CUDA_SAFE_CALL(hipMemcpy(&d_frontiers[0], &source, sizeof(int), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipDeviceSynchronize());
int frontiers_len = 0;
int max_blocks = maximum_residency(bc_forward, nthreads, 0);
depth_index.push_back(0);
printf("Launching CUDA BC solver (%d CTAs/SM, %d threads/CTA) ...\n", max_blocks, nthreads);
Timer t;
t.Start();
do {
depth++;
h_changed = false;
//printf("iteration=%d, frontire_size=%d\n", depth, h_nitems);
CUDA_SAFE_CALL(hipMemcpy(d_changed, &h_changed, sizeof(bool), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_nitems, &zero, sizeof(int), hipMemcpyHostToDevice));
frontiers_len += h_nitems;
depth_index.push_back(frontiers_len);
hipLaunchKernelGGL(( bc_forward), dim3(nblocks), dim3(nthreads), 0, 0, m, d_row_offsets, d_column_indices, d_path_counts, d_depths, depth, d_changed, d_visited, d_expanded, d_nitems, d_frontiers, frontiers_len);
CudaTest("solving bc_forward failed");
hipLaunchKernelGGL(( bc_update) , dim3(nblocks), dim3(nthreads), 0, 0, m, d_depths, d_visited);
CudaTest("solving bc_update failed");
CUDA_SAFE_CALL(hipMemcpy(&h_changed, d_changed, sizeof(bool), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(&h_nitems, d_nitems, sizeof(int), hipMemcpyDeviceToHost));
} while (h_changed);
CUDA_SAFE_CALL(hipDeviceSynchronize());
//printf("\nDone Forward BFS, starting back propagation (dependency accumulation)\n");
for (int d = depth_index.size() - 2; d >= 0; d--) {
h_nitems = depth_index[d+1] - depth_index[d];
//thrust::sort(thrust::device, d_frontiers+depth_index[d], d_frontiers+depth_index[d+1]);
nblocks = (h_nitems - 1) / nthreads + 1;
//printf("Reverse: depth=%d, frontier_size=%d\n", d, h_nitems);
hipLaunchKernelGGL(( bc_reverse), dim3(nblocks), dim3(nthreads), 0, 0, h_nitems, d_row_offsets, d_column_indices, depth_index[d], d_frontiers, d_scores, d_path_counts, d_depths, d, d_deltas);
CudaTest("solving kernel2 failed");
}
//CUDA_SAFE_CALL(hipMemcpy(h_scores, d_scores, sizeof(ScoreT) * m, hipMemcpyDeviceToHost));
//printf("\nStart calculating the maximum score\n");
ScoreT *d_max_score;
d_max_score = thrust::max_element(thrust::device, d_scores, d_scores + m);
ScoreT h_max_score;
CUDA_SAFE_CALL(hipMemcpy(&h_max_score, d_max_score, sizeof(ScoreT), hipMemcpyDeviceToHost));
//h_max_score = *max_element(h_scores, h_scores+m);
//for (int n = 0; n < m; n ++) h_scores[n] = h_scores[n] / h_max_score;
//std::cout << "max_score = " << h_max_score << "\n";
//printf("\nStart normalizing scores\n");
nthreads = 512;
nblocks = (m - 1) / nthreads + 1;
hipLaunchKernelGGL(( bc_normalize), dim3(nblocks), dim3(nthreads), 0, 0, m, d_scores, h_max_score);
CUDA_SAFE_CALL(hipDeviceSynchronize());
t.Stop();
printf("\truntime [%s] = %f ms.\n", BC_VARIANT, t.Millisecs());
CUDA_SAFE_CALL(hipMemcpy(h_scores, d_scores, sizeof(ScoreT) * m, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(d_path_counts));
CUDA_SAFE_CALL(hipFree(d_depths));
CUDA_SAFE_CALL(hipFree(d_deltas));
CUDA_SAFE_CALL(hipFree(d_frontiers));
CUDA_SAFE_CALL(hipFree(d_row_offsets));
CUDA_SAFE_CALL(hipFree(d_column_indices));
}
| 1560217cd1485350dbb2ec0971bd82dbae3ac515.cu | // Copyright (c) 2016, Xuhao Chen
#define BC_VARIANT "topo_base"
#include "bc.h"
#include "cuda_launch_config.hpp"
#include "cutil_subset.h"
#include "worklistc.h"
#include "timer.h"
#include <vector>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
__global__ void initialize(int m, int source, ScoreT *scores, int *path_counts, int *depths, ScoreT *deltas, bool *visited, bool *expanded) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < m) {
scores[id] = 0;
deltas[id] = 0;
expanded[id] = false;
if(id == source) {
visited[id] = true;
path_counts[id] = 1;
depths[id] = 0;
} else {
visited[id] = false;
path_counts[id] = 0;
depths[id] = -1;
}
}
}
// Shortest path calculation by forward BFS
__global__ void bc_forward(int m, int *row_offsets, int *column_indices, int *path_counts, int *depths, int depth, bool *changed, bool *visited, bool *expanded, int *nitems, int *queue, int queue_len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//int total_inputs = (m - 1) / (gridDim.x * blockDim.x) + 1;
//for (int src = tid; total_inputs > 0; src += blockDim.x * gridDim.x, total_inputs--) {
int src = tid;
if(src < m && visited[src] && !expanded[src]) {
expanded[src] = true;
int row_begin = row_offsets[src];
int row_end = row_offsets[src + 1];
for (int offset = row_begin; offset < row_end; ++ offset) {
int dst = column_indices[offset];
//int depth = depths[src] + 1;
if ((depths[dst] == -1) && (atomicCAS(&depths[dst], -1, depth)==-1)) {
int pos = atomicAdd(nitems, 1);
queue[queue_len + pos] = dst;
*changed = true;
}
if (depths[dst] == depth) {
atomicAdd(&path_counts[dst], path_counts[src]);
}
}
}
}
// Dependency accumulation by back propagation
__global__ void bc_reverse(int num, int *row_offsets, int *column_indices, int start, int *frontiers, ScoreT *scores, int *path_counts, int *depths, int depth, ScoreT *deltas) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num) {
int src = frontiers[start + id];
int row_begin = row_offsets[src];
int row_end = row_offsets[src + 1];
ScoreT delta_src = 0;
for (int offset = row_begin; offset < row_end; ++ offset) {
int dst = column_indices[offset];
//if(depths[dst] == depths[src] + 1) {
if(depths[dst] == depth + 1) {
delta_src += static_cast<ScoreT>(path_counts[src]) /
static_cast<ScoreT>(path_counts[dst]) * (1 + deltas[dst]);
}
}
deltas[src] = delta_src;
scores[src] += deltas[src];
}
}
__global__ void bc_update(int m, int *depths, bool *visited) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < m) {
if(depths[id] != -1 && !visited[id])
visited[id] = true;
}
}
__global__ void bc_normalize(int m, ScoreT *scores, ScoreT max_score) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < m) {
scores[tid] = scores[tid] / (max_score);
}
}
void BCSolver(int m, int nnz, int source, int *h_row_offsets, int *h_column_indices, ScoreT *h_scores) {
//print_device_info(0);
int zero = 0;
int *d_row_offsets, *d_column_indices;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_column_indices, nnz * sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpy(d_row_offsets, h_row_offsets, (m + 1) * sizeof(int), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_column_indices, h_column_indices, nnz * sizeof(int), cudaMemcpyHostToDevice));
ScoreT *d_scores, *d_deltas;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_scores, sizeof(ScoreT) * m));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_deltas, sizeof(ScoreT) * m));
int *d_path_counts, *d_depths, *d_frontiers;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_path_counts, sizeof(int) * m));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_depths, sizeof(int) * m));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_frontiers, sizeof(int) * (m+1)));
bool *d_changed, h_changed, *d_visited, *d_expanded;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_changed, sizeof(bool)));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_visited, m * sizeof(bool)));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_expanded, m * sizeof(bool)));
int *d_nitems, h_nitems = 1;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_nitems, sizeof(int)));
int depth = 0;
vector<int> depth_index;
int nthreads = BLOCK_SIZE;
int nblocks = (m - 1) / nthreads + 1;
initialize <<<nblocks, nthreads>>> (m, source, d_scores, d_path_counts, d_depths, d_deltas, d_visited, d_expanded);
CudaTest("initializing failed");
CUDA_SAFE_CALL(cudaMemcpy(&d_frontiers[0], &source, sizeof(int), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
int frontiers_len = 0;
int max_blocks = maximum_residency(bc_forward, nthreads, 0);
depth_index.push_back(0);
printf("Launching CUDA BC solver (%d CTAs/SM, %d threads/CTA) ...\n", max_blocks, nthreads);
Timer t;
t.Start();
do {
depth++;
h_changed = false;
//printf("iteration=%d, frontire_size=%d\n", depth, h_nitems);
CUDA_SAFE_CALL(cudaMemcpy(d_changed, &h_changed, sizeof(bool), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_nitems, &zero, sizeof(int), cudaMemcpyHostToDevice));
frontiers_len += h_nitems;
depth_index.push_back(frontiers_len);
bc_forward<<<nblocks, nthreads>>>(m, d_row_offsets, d_column_indices, d_path_counts, d_depths, depth, d_changed, d_visited, d_expanded, d_nitems, d_frontiers, frontiers_len);
CudaTest("solving bc_forward failed");
bc_update <<<nblocks, nthreads>>> (m, d_depths, d_visited);
CudaTest("solving bc_update failed");
CUDA_SAFE_CALL(cudaMemcpy(&h_changed, d_changed, sizeof(bool), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(&h_nitems, d_nitems, sizeof(int), cudaMemcpyDeviceToHost));
} while (h_changed);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
//printf("\nDone Forward BFS, starting back propagation (dependency accumulation)\n");
for (int d = depth_index.size() - 2; d >= 0; d--) {
h_nitems = depth_index[d+1] - depth_index[d];
//thrust::sort(thrust::device, d_frontiers+depth_index[d], d_frontiers+depth_index[d+1]);
nblocks = (h_nitems - 1) / nthreads + 1;
//printf("Reverse: depth=%d, frontier_size=%d\n", d, h_nitems);
bc_reverse<<<nblocks, nthreads>>>(h_nitems, d_row_offsets, d_column_indices, depth_index[d], d_frontiers, d_scores, d_path_counts, d_depths, d, d_deltas);
CudaTest("solving kernel2 failed");
}
//CUDA_SAFE_CALL(cudaMemcpy(h_scores, d_scores, sizeof(ScoreT) * m, cudaMemcpyDeviceToHost));
//printf("\nStart calculating the maximum score\n");
ScoreT *d_max_score;
d_max_score = thrust::max_element(thrust::device, d_scores, d_scores + m);
ScoreT h_max_score;
CUDA_SAFE_CALL(cudaMemcpy(&h_max_score, d_max_score, sizeof(ScoreT), cudaMemcpyDeviceToHost));
//h_max_score = *max_element(h_scores, h_scores+m);
//for (int n = 0; n < m; n ++) h_scores[n] = h_scores[n] / h_max_score;
//std::cout << "max_score = " << h_max_score << "\n";
//printf("\nStart normalizing scores\n");
nthreads = 512;
nblocks = (m - 1) / nthreads + 1;
bc_normalize<<<nblocks, nthreads>>>(m, d_scores, h_max_score);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
t.Stop();
printf("\truntime [%s] = %f ms.\n", BC_VARIANT, t.Millisecs());
CUDA_SAFE_CALL(cudaMemcpy(h_scores, d_scores, sizeof(ScoreT) * m, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(d_path_counts));
CUDA_SAFE_CALL(cudaFree(d_depths));
CUDA_SAFE_CALL(cudaFree(d_deltas));
CUDA_SAFE_CALL(cudaFree(d_frontiers));
CUDA_SAFE_CALL(cudaFree(d_row_offsets));
CUDA_SAFE_CALL(cudaFree(d_column_indices));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.