hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
4fe4139fa322c69577d92781042744db3de36116.hip | // !!! This is a file automatically generated by hipify!!!
#include "file_system.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__device__ __managed__ u32 gtime = 0;
/* Utilities */
/* Return number of blocks needed to store nb bytes of data.
* 0 byte needs 1 block to store. */
__device__ u32 bytes2blocks(u32 nb) {
return nb == 0? 1 : (nb - 1) / DATA_BLOCK_SIZE + 1;
}
/* Adapted from: http://tekpool.wordpress.com/category/bit-count/
* Return location of first 1-bit in x, LSB to MSB, return 32 if not found. */
__device__ int first_1bit(int x) {
u32 u = (x & (-x)) - 1;
u32 uCount = u
- ((u >> 1) & 033333333333)
- ((u >> 2) & 011111111111);
return
((uCount + (uCount >> 3))
& 030707070707) % 63;
}
/* Return location of first n consecutive 1-bit's in x, LSB to MSB.
* Return -1 if not found. */
__device__ int first_n_1bit(u32 x, int n) {
int ones = 0;
int count = 0;
for (; x != 0; x >>= 1, count++) {
ones = (x & 1)? ones+1 : 0;
if (ones >= n)
return count - n + 1;
}
return -1;
}
/* https://stackoverflow.com/questions/50196897/creating-a-bit-
* mask-for-relevant-bits-over-multiple-bytes-programmatically */
__device__ u32 bitmask(int start, int length) {
u32 mask = 0xffffffff;
mask >>= 32 - length;
mask <<= start;
return mask;
}
__device__ char* my_strcpy(char* dest, const char* src) {
char *save = dest;
while (*dest++ = *src++);
return save;
}
__device__ int my_strcmp(const char* s1, const char* s2) {
uchar c1, c2;
do {
c1 = *s1++;
c2 = *s2++;
if (c1 == '\0')
return c1 - c2;
} while (c1 == c2);
return c1 - c2;
}
__device__ void swap(int* x, int* y) {
int temp = *x;
*x = *y;
*y = temp;
}
__device__ void print_centered(const char *str, int width, const char pad) {
char* padded = new char[width];
memset(padded, pad, width);
const char* s;
for (s = str; *s; ++s);
int strLen = s - str;
int padLen = (width - strLen) / 2;
memcpy(&padded[padLen], str, strLen);
printf("%s\n", padded);
delete[] padded;
}
/* File System */
/* Scan linearly the files array and return the first uninitialized fp.
* Return `EMPTY` if all `file` are initialized. */
__device__ u32 fs_find_empty_fp(FileSystem* fs) {
for (auto fp = 0; fp < FCB_ENTRIES; ++fp) {
if (fs->files[fp].starting_block == EMPTY)
return fp;
} return EMPTY;
}
__device__ u32 fs_find_name(FileSystem* fs, const char fname[]) {
u32 fp, fcount;
for (fp = fcount = 0; fp < FCB_ENTRIES && fcount < fs->nfiles; ++fp) {
if (fs->files[fp].starting_block != EMPTY) {
++fcount;
if (my_strcmp(fs->files[fp].fname, fname) == 0)
return fp;
}
}
return EMPTY;
}
__device__ void fs_init(FileSystem *fs) {
/* init all blocks status as empty (1) */
memset(fs->super, EMPTY, sizeof(fs->super));
/* init disk */
memset(fs->data, 0, sizeof(fs->data));
/* init FCB */
fs->nfiles = 0;
for (int i = 0; i < FCB_ENTRIES; ++i)
fs->files[i].starting_block = EMPTY;
}
__device__ u32 fs_open(FileSystem* fs, const char fname[], int op) {
u32 fp;
if ( (fp = fs_find_name(fs, fname)) != EMPTY ) {
printf("[fs_open] : File \"%s\" opened, fp: %d\n", fname, fp);
fs->files[fp].btime = ++gtime;
return fp;
}
// file doesn't exist. create new one.
printf("[fs_open] : Creating file \"%s\"\n", fname);
// error if number of files reached max
if (fs->nfiles >= FCB_ENTRIES) {
printf("[fs_open] : Failed. Number of files (%d) reached maximum!\n", FCB_ENTRIES);
return EMPTY;
}
// find empty fp
fp = fs_find_empty_fp(fs);
// find empty block for new file
int block_offset;
for (auto i = 0; i < N_SUPERBLOCKS; ++i) {
if ( (block_offset = first_1bit(fs->super[i])) < 32 ) {
my_strcpy(fs->files[fp].fname, fname);
fs->files[fp].starting_block = i * 32 + block_offset;
fs->files[fp].fsize = 0;
fs->files[fp].btime = ++gtime;
fs->super[i] ^= (1 << block_offset);
++fs->nfiles;
return fp;
}
}
// cannot find empty block
printf("[fs_open] : Failed. Cannot find any empty block!\n");
return EMPTY;
}
__device__ void fs_read(FileSystem *fs, uchar* output, u32 size, u32 fp) {
if (fs->files[fp].starting_block == EMPTY)
printf("[fs_read] : fp %d does not exist.\n", fp);
else {
size = (size <= fs->files[fp].fsize)? size : fs->files[fp].fsize;
auto starting_byte = fs->files[fp].starting_block * DATA_BLOCK_SIZE;
memcpy(output, &fs->data[starting_byte], size);
printf("[fs_read] : %d bytes of file \"%s\" read to output buffer.\n", size, fs->files[fp].fname);
fs->files[fp].btime = ++gtime;
}
}
// Assume fp exists.
__device__ void fs_write(FileSystem *fs, uchar* input, u32 size, u32 fp) {
if (size > MAX_FILE_SIZE) {
printf("[fs_write] : File size limit exceeded (%d > %d)",
size, MAX_FILE_SIZE);
return;
}
// old starting block
auto starting_block = fs->files[fp].starting_block;
// old number of blocks occupied
auto old_blocks = bytes2blocks(fs->files[fp].fsize);
// new number of blocks required
auto new_blocks = bytes2blocks(size);
// free old blocks temporarily.
fs->super[starting_block / 32] ^= bitmask(starting_block % 32, old_blocks);
// need to find larger unoccupied blocks to write
if (new_blocks > old_blocks) {
int offset;
for (int i = 0; i < N_SUPERBLOCKS; ++i) {
offset = first_n_1bit(fs->super[i], new_blocks);
if (offset != -1) {
starting_block = i * 32 + offset;
break;
}
}
// failed to find larger unoccupied blocks, resume super status and return.
if (offset == -1) {
printf("[fs_write] : Failed to write to fp %d. Not enough data blocks!\n", fp);
fs->super[starting_block / 32] ^= bitmask(starting_block % 32, old_blocks);
return;
}
}
// found suitable starting block to write, rewrite data
printf("[fs_write] : %d bytes written to file \"%s\"!\n", size, fs->files[fp].fname);
memcpy(&fs->data[starting_block * DATA_BLOCK_SIZE], input, size);
// update file info
fs->files[fp].starting_block = starting_block;
fs->files[fp].fsize = size;
fs->files[fp].btime = ++gtime;
// update fs: flip occupancy bits of new blocks and increment file counter.
fs->super[starting_block / 32] ^= bitmask(starting_block % 32, new_blocks);
}
__device__ void fp_bubble_sort(FileSystem* fs, int* fp, int n,
int(*cmp)(const File*, const File*)) {
auto* files = fs->files;
bool swapped;
for (int i = 0; i < n - 1; ++i) {
for (int j = 0; j < n - i - 1; ++j) {
auto* l = &files[fp[j]];
auto* r = &files[fp[j+1]];
if (cmp(l, r) > 0 || (cmp(l, r) == 0 && l->btime < r->btime)) {
swap(&fp[j], &fp[j+1]);
swapped = true;
}
}
if (!swapped) return;
}
}
__device__ void fs_gsys(FileSystem* fs, int op) {
/* Operations encoded by following op's
* LS_D: list files by Time (latest first)
* LS_S: list files by Size (largest first)
* LS_N: list files by Name (lexical order)
* LS_F: list files by fp (smalledst first) */
auto fcount = 0;
int nfiles = fs->nfiles;
printf("\n%d file%c\t\tGtime: %d\n",
nfiles, nfiles > 1? 's' : '\0', gtime);
// obtain array of all current fp
int* fp = new int[nfiles];
for (int i = 0; i < FCB_ENTRIES && fcount < nfiles; ++i) {
if (fs->files[i].starting_block != EMPTY)
fp[fcount++] = i;
}
// bubble sort the fp array according to specified op
switch (op) {
case LS_D:
print_centered("Sort by Time", 60, '-');
printf("%-20s %-10s %-10s %-10s %-10s\n",
"Name", "fp", "Size", "Time*", "Blocks");
fp_bubble_sort(fs, fp, nfiles, [](auto* f1, auto* f2) {
return int(f2->btime - f1->btime);});
break;
case LS_S:
print_centered("Sort by Size", 60, '-');
printf("%-20s %-10s %-10s %-10s %-10s\n",
"Name", "fp", "Size*", "Time", "Blocks");
fp_bubble_sort(fs, fp, nfiles, [](auto* f1, auto* f2) {
return int(f2->fsize - f1->fsize);});
break;
case LS_N:
print_centered("Sort by Name", 60, '-');
printf("%-20s %-10s %-10s %-10s %-10s\n",
"Name*", "fp", "Size", "Time", "Blocks");
fp_bubble_sort(fs, fp, nfiles, [](auto* f1, auto* f2) {
return my_strcmp(f1->fname, f2->fname);});
break;
case LS_F:
print_centered("Sort by fp", 60, '-');
printf("%-20s %-10s %-10s %-10s %-10s\n",
"Name", "fp*", "Size", "Time", "Blocks");
break;
default:
printf("[fs_gsys] : Invalid operation! (%d)\n", op);
delete[] fp; return;
}
File* file;
for (int i = 0; i < nfiles; ++i) {
file = &fs->files[fp[i]];
printf("%-20s %-10d %-10d %-10d %d-%d\n",
file->fname, fp[i],
file->fsize,
file->btime,
file->starting_block,
file->starting_block
+ bytes2blocks(file->fsize) - 1);
}
printf("\n");
delete[] fp;
}
__device__ void fs_gsys(FileSystem* fs, int op, const char fname[]) {
/* Operations encoded by following op's
* RM: remove a file */
switch (op) {
case RM:
u32 fp;
if ( (fp = fs_find_name(fs, fname)) != EMPTY ) {
printf("[fs_gsys] : File \"%s\" removed!\n", fname);
auto staring_block = fs->files[fp].starting_block;
auto blocks = bytes2blocks(fs->files[fp].fsize);
// deactivate fp
fs->files[fp].starting_block = EMPTY;
// update fs: free blocks in super and decrement file counter
fs->super[staring_block / 32] ^= bitmask(staring_block % 32, blocks);
fs->nfiles--;
} else // file doesn't exist.
printf("[fs_gsys] : file to remove \"%s\" doesn't exist!\n", fname);
break;
default:
printf("[fs_gsys] : Invalid operation! (%d)\n", op);
return;
}
}
| 4fe4139fa322c69577d92781042744db3de36116.cu | #include "file_system.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__device__ __managed__ u32 gtime = 0;
/* Utilities */
/* Return number of blocks needed to store nb bytes of data.
* 0 byte needs 1 block to store. */
__device__ u32 bytes2blocks(u32 nb) {
return nb == 0? 1 : (nb - 1) / DATA_BLOCK_SIZE + 1;
}
/* Adapted from: http://tekpool.wordpress.com/category/bit-count/
* Return location of first 1-bit in x, LSB to MSB, return 32 if not found. */
__device__ int first_1bit(int x) {
u32 u = (x & (-x)) - 1;
u32 uCount = u
- ((u >> 1) & 033333333333)
- ((u >> 2) & 011111111111);
return
((uCount + (uCount >> 3))
& 030707070707) % 63;
}
/* Return location of first n consecutive 1-bit's in x, LSB to MSB.
* Return -1 if not found. */
__device__ int first_n_1bit(u32 x, int n) {
int ones = 0;
int count = 0;
for (; x != 0; x >>= 1, count++) {
ones = (x & 1)? ones+1 : 0;
if (ones >= n)
return count - n + 1;
}
return -1;
}
/* https://stackoverflow.com/questions/50196897/creating-a-bit-
* mask-for-relevant-bits-over-multiple-bytes-programmatically */
__device__ u32 bitmask(int start, int length) {
u32 mask = 0xffffffff;
mask >>= 32 - length;
mask <<= start;
return mask;
}
__device__ char* my_strcpy(char* dest, const char* src) {
char *save = dest;
while (*dest++ = *src++);
return save;
}
__device__ int my_strcmp(const char* s1, const char* s2) {
uchar c1, c2;
do {
c1 = *s1++;
c2 = *s2++;
if (c1 == '\0')
return c1 - c2;
} while (c1 == c2);
return c1 - c2;
}
__device__ void swap(int* x, int* y) {
int temp = *x;
*x = *y;
*y = temp;
}
__device__ void print_centered(const char *str, int width, const char pad) {
char* padded = new char[width];
memset(padded, pad, width);
const char* s;
for (s = str; *s; ++s);
int strLen = s - str;
int padLen = (width - strLen) / 2;
memcpy(&padded[padLen], str, strLen);
printf("%s\n", padded);
delete[] padded;
}
/* File System */
/* Scan linearly the files array and return the first uninitialized fp.
* Return `EMPTY` if all `file` are initialized. */
__device__ u32 fs_find_empty_fp(FileSystem* fs) {
for (auto fp = 0; fp < FCB_ENTRIES; ++fp) {
if (fs->files[fp].starting_block == EMPTY)
return fp;
} return EMPTY;
}
__device__ u32 fs_find_name(FileSystem* fs, const char fname[]) {
u32 fp, fcount;
for (fp = fcount = 0; fp < FCB_ENTRIES && fcount < fs->nfiles; ++fp) {
if (fs->files[fp].starting_block != EMPTY) {
++fcount;
if (my_strcmp(fs->files[fp].fname, fname) == 0)
return fp;
}
}
return EMPTY;
}
__device__ void fs_init(FileSystem *fs) {
/* init all blocks status as empty (1) */
memset(fs->super, EMPTY, sizeof(fs->super));
/* init disk */
memset(fs->data, 0, sizeof(fs->data));
/* init FCB */
fs->nfiles = 0;
for (int i = 0; i < FCB_ENTRIES; ++i)
fs->files[i].starting_block = EMPTY;
}
__device__ u32 fs_open(FileSystem* fs, const char fname[], int op) {
u32 fp;
if ( (fp = fs_find_name(fs, fname)) != EMPTY ) {
printf("[fs_open] : File \"%s\" opened, fp: %d\n", fname, fp);
fs->files[fp].btime = ++gtime;
return fp;
}
// file doesn't exist. create new one.
printf("[fs_open] : Creating file \"%s\"\n", fname);
// error if number of files reached max
if (fs->nfiles >= FCB_ENTRIES) {
printf("[fs_open] : Failed. Number of files (%d) reached maximum!\n", FCB_ENTRIES);
return EMPTY;
}
// find empty fp
fp = fs_find_empty_fp(fs);
// find empty block for new file
int block_offset;
for (auto i = 0; i < N_SUPERBLOCKS; ++i) {
if ( (block_offset = first_1bit(fs->super[i])) < 32 ) {
my_strcpy(fs->files[fp].fname, fname);
fs->files[fp].starting_block = i * 32 + block_offset;
fs->files[fp].fsize = 0;
fs->files[fp].btime = ++gtime;
fs->super[i] ^= (1 << block_offset);
++fs->nfiles;
return fp;
}
}
// cannot find empty block
printf("[fs_open] : Failed. Cannot find any empty block!\n");
return EMPTY;
}
__device__ void fs_read(FileSystem *fs, uchar* output, u32 size, u32 fp) {
if (fs->files[fp].starting_block == EMPTY)
printf("[fs_read] : fp %d does not exist.\n", fp);
else {
size = (size <= fs->files[fp].fsize)? size : fs->files[fp].fsize;
auto starting_byte = fs->files[fp].starting_block * DATA_BLOCK_SIZE;
memcpy(output, &fs->data[starting_byte], size);
printf("[fs_read] : %d bytes of file \"%s\" read to output buffer.\n", size, fs->files[fp].fname);
fs->files[fp].btime = ++gtime;
}
}
// Assume fp exists.
__device__ void fs_write(FileSystem *fs, uchar* input, u32 size, u32 fp) {
if (size > MAX_FILE_SIZE) {
printf("[fs_write] : File size limit exceeded (%d > %d)",
size, MAX_FILE_SIZE);
return;
}
// old starting block
auto starting_block = fs->files[fp].starting_block;
// old number of blocks occupied
auto old_blocks = bytes2blocks(fs->files[fp].fsize);
// new number of blocks required
auto new_blocks = bytes2blocks(size);
// free old blocks temporarily.
fs->super[starting_block / 32] ^= bitmask(starting_block % 32, old_blocks);
// need to find larger unoccupied blocks to write
if (new_blocks > old_blocks) {
int offset;
for (int i = 0; i < N_SUPERBLOCKS; ++i) {
offset = first_n_1bit(fs->super[i], new_blocks);
if (offset != -1) {
starting_block = i * 32 + offset;
break;
}
}
// failed to find larger unoccupied blocks, resume super status and return.
if (offset == -1) {
printf("[fs_write] : Failed to write to fp %d. Not enough data blocks!\n", fp);
fs->super[starting_block / 32] ^= bitmask(starting_block % 32, old_blocks);
return;
}
}
// found suitable starting block to write, rewrite data
printf("[fs_write] : %d bytes written to file \"%s\"!\n", size, fs->files[fp].fname);
memcpy(&fs->data[starting_block * DATA_BLOCK_SIZE], input, size);
// update file info
fs->files[fp].starting_block = starting_block;
fs->files[fp].fsize = size;
fs->files[fp].btime = ++gtime;
// update fs: flip occupancy bits of new blocks and increment file counter.
fs->super[starting_block / 32] ^= bitmask(starting_block % 32, new_blocks);
}
__device__ void fp_bubble_sort(FileSystem* fs, int* fp, int n,
int(*cmp)(const File*, const File*)) {
auto* files = fs->files;
bool swapped;
for (int i = 0; i < n - 1; ++i) {
for (int j = 0; j < n - i - 1; ++j) {
auto* l = &files[fp[j]];
auto* r = &files[fp[j+1]];
if (cmp(l, r) > 0 || (cmp(l, r) == 0 && l->btime < r->btime)) {
swap(&fp[j], &fp[j+1]);
swapped = true;
}
}
if (!swapped) return;
}
}
__device__ void fs_gsys(FileSystem* fs, int op) {
/* Operations encoded by following op's
* LS_D: list files by Time (latest first)
* LS_S: list files by Size (largest first)
* LS_N: list files by Name (lexical order)
* LS_F: list files by fp (smalledst first) */
auto fcount = 0;
int nfiles = fs->nfiles;
printf("\n%d file%c\t\tGtime: %d\n",
nfiles, nfiles > 1? 's' : '\0', gtime);
// obtain array of all current fp
int* fp = new int[nfiles];
for (int i = 0; i < FCB_ENTRIES && fcount < nfiles; ++i) {
if (fs->files[i].starting_block != EMPTY)
fp[fcount++] = i;
}
// bubble sort the fp array according to specified op
switch (op) {
case LS_D:
print_centered("Sort by Time", 60, '-');
printf("%-20s %-10s %-10s %-10s %-10s\n",
"Name", "fp", "Size", "Time*", "Blocks");
fp_bubble_sort(fs, fp, nfiles, [](auto* f1, auto* f2) {
return int(f2->btime - f1->btime);});
break;
case LS_S:
print_centered("Sort by Size", 60, '-');
printf("%-20s %-10s %-10s %-10s %-10s\n",
"Name", "fp", "Size*", "Time", "Blocks");
fp_bubble_sort(fs, fp, nfiles, [](auto* f1, auto* f2) {
return int(f2->fsize - f1->fsize);});
break;
case LS_N:
print_centered("Sort by Name", 60, '-');
printf("%-20s %-10s %-10s %-10s %-10s\n",
"Name*", "fp", "Size", "Time", "Blocks");
fp_bubble_sort(fs, fp, nfiles, [](auto* f1, auto* f2) {
return my_strcmp(f1->fname, f2->fname);});
break;
case LS_F:
print_centered("Sort by fp", 60, '-');
printf("%-20s %-10s %-10s %-10s %-10s\n",
"Name", "fp*", "Size", "Time", "Blocks");
break;
default:
printf("[fs_gsys] : Invalid operation! (%d)\n", op);
delete[] fp; return;
}
File* file;
for (int i = 0; i < nfiles; ++i) {
file = &fs->files[fp[i]];
printf("%-20s %-10d %-10d %-10d %d-%d\n",
file->fname, fp[i],
file->fsize,
file->btime,
file->starting_block,
file->starting_block
+ bytes2blocks(file->fsize) - 1);
}
printf("\n");
delete[] fp;
}
__device__ void fs_gsys(FileSystem* fs, int op, const char fname[]) {
/* Operations encoded by following op's
* RM: remove a file */
switch (op) {
case RM:
u32 fp;
if ( (fp = fs_find_name(fs, fname)) != EMPTY ) {
printf("[fs_gsys] : File \"%s\" removed!\n", fname);
auto staring_block = fs->files[fp].starting_block;
auto blocks = bytes2blocks(fs->files[fp].fsize);
// deactivate fp
fs->files[fp].starting_block = EMPTY;
// update fs: free blocks in super and decrement file counter
fs->super[staring_block / 32] ^= bitmask(staring_block % 32, blocks);
fs->nfiles--;
} else // file doesn't exist.
printf("[fs_gsys] : file to remove \"%s\" doesn't exist!\n", fname);
break;
default:
printf("[fs_gsys] : Invalid operation! (%d)\n", op);
return;
}
}
|
0a35a60521c9b5c3c9b8cda083215621276b8ea1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#endif
namespace at::native {
using namespace at::cuda::detail;
template <typename T>
__host__ __device__ __forceinline__ T ceilDiv(T a, T b) {
return (a + b - 1) / b;
}
template <typename T>
__global__ void max_unpooling2d_forward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
int64_t outputImageSize = outputHeight * outputWidth;
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
output += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
CUDA_KERNEL_ASSERT(maxind >= 0 && maxind < outputImageSize);
output[maxind] = input[linearIndex];
}
}
template <typename T>
__global__ void max_unpooling3d_forward_kernel(
PackedTensorAccessor64<T, 4> input,
PackedTensorAccessor64<int64_t, 4> indices,
T* output,
const int64_t oT,
const int64_t oH,
const int64_t oW,
const int64_t offsetZ) {
int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y;
int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time
int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature
int64_t outputImageSize = oT * oH * oW;
if (iRow < input.size(2) && iColumn < input.size(3)) {
T val = input[slice][iFrame][iRow][iColumn];
int64_t index = indices[slice][iFrame][iRow][iColumn];
CUDA_KERNEL_ASSERT(index >= 0 && index < outputImageSize);
output[slice * oT * oH * oW + index] = val;
}
}
template <typename T>
__global__ void max_unpooling2d_backward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
input += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[linearIndex] = input[maxind];
}
}
template <typename T>
__global__ void max_unpooling3d_backward_kernel(
const T* gradOutputData,
int64_t oT,
int64_t oH,
int64_t oW,
PackedTensorAccessor64<int64_t, 4> indices,
PackedTensorAccessor64<T, 4> gradInput,
int offsetZ) {
int iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time
int slice =
(blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature
if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) {
int64_t index = indices[slice][iFrame][iRow][iColumn];
T grad_val = gradOutputData[slice * oT * oH * oW + index];
gradInput[slice][iFrame][iRow][iColumn] = grad_val;
}
}
Tensor& max_unpooling2d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& output) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic with duplicate indices
at::globalContext().alertNotDeterministic("max_unpooling2d_forward_out");
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices_.scalar_type());
auto oheight = output_size[0];
auto owidth = output_size[1];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg});
for (int64_t i = 1; i < self_.ndimension(); ++i) {
TORCH_CHECK(self_.size(i) > 0, "max_unpooling2d_forward_out_cuda(): ",
"Expected input to have non-zero size for non-batch dimensions, but got ",
self_.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, but got tensor with dimension: ", self_.ndimension());
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(
output_size.size() == 2,
"There should be exactly two elements (width, height) in output_size, but got ", output_size.size(), " elements.");
int64_t dimw = 2;
int64_t dimh = 1;
int64_t numBatch = 1;
int64_t numChannels;
int64_t inputHeight;
int64_t inputWidth;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
if (self.ndimension() == 4) {
numBatch = self.size(0);
dimw++;
dimh++;
}
numChannels = self.size(dimh - 1);
inputHeight = self.size(dimh);
inputWidth = self.size(dimw);
output.resize_({numBatch, numChannels, oheight, owidth});
output.zero_();
auto count = self.numel();
if (count != 0) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] {
hipLaunchKernelGGL(( max_unpooling2d_forward_kernel),
dim3(GET_BLOCKS(count)),
dim3(CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.numel(),
self.const_data_ptr<scalar_t>(),
indices.const_data_ptr<int64_t>(),
numChannels,
inputHeight,
inputWidth,
oheight,
owidth,
output.mutable_data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}));
}
if (self.ndimension() == 3) {
output.resize_({numChannels, oheight, owidth});
}
return output;
}
Tensor max_unpooling2d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling2d_forward_out_cuda(self, indices, output_size, output);
return output;
}
static void max_unpooling3d_shape_check(
const Tensor& input,
const Tensor& gradOutput,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
const char *fn_name) {
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TORCH_CHECK(
indices.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices.scalar_type());
TORCH_CHECK(
(input.ndimension() == 4 || input.ndimension() == 5),
"Input to max_unpooling3d should be a 4d or 5d Tensor, but got a tensor with dim ", input.ndimension());
TORCH_CHECK(
output_size.size() == 3,
"There should be exactly three elements (depth, height, width) in output_size, but got ", output_size.size(), " elements.");
TORCH_CHECK(
stride.size() == 3,
"There should be exactly three elements (depth, height, width) in stride, but got: ", stride.size(), " elements.");
TORCH_CHECK(
padding.size() == 3,
"There should be exactly three elements (depth, height, width) in padding, but got: ", padding.size(), " elements.");
TORCH_CHECK(
input.sizes() == indices.sizes(),
"Expected shape of indices to be: ", input.sizes(), " but got: ", indices.sizes());
for (int64_t i = 1; i < input.ndimension(); ++i) {
TORCH_CHECK(input.size(i) > 0, fn_name,
": Expected input to have non-zero size for non-batch dimensions, but got ",
input.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
stride[0] > 0 && stride[1] > 0 && stride[2] > 0,
"strides should be greater than zero, but got stride: ",
stride);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input.ndimension() == 5) {
dimw++;
dimh++;
dimt++;
dimn++;
}
int nslices = input.size(dimn);
if (gradOutput.defined()) {
if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) ||
oW != gradOutput.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. oT= ",
oT,
", oH= ",
oH,
", oW= ",
oW,
". gradOutput: ",
gradOutput.size(dimt),
"x",
gradOutput.size(dimh),
"x",
gradOutput.size(dimw));
}
TORCH_CHECK(
gradOutput.ndimension() == input.ndimension() &&
gradOutput.size(dimn) == nslices,
"gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices");
}
}
Tensor& max_unpooling3d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& output) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic with duplicate indices
at::globalContext().alertNotDeterministic("max_unpooling3d_forward_out");
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
max_unpooling3d_shape_check(
self_, Tensor(), indices_, output_size, stride, padding, "max_unpooling3d_forward_out_cuda()");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
int64_t batchSize;
int64_t inputSlices;
int64_t inputTime;
int64_t inputHeight;
int64_t inputWidth;
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
output.resize_({inputSlices, oT, oH, oW});
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
output.resize_({batchSize, inputSlices, oT, oH, oW});
}
output.zero_();
// Collapse batch and feature dimensions if needed
if (self.ndimension() == 5) {
self = self.reshape({self.size(0) * self.size(1),
self.size(2),
self.size(3),
self.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (self.numel() == 0) {
return output;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_unpooling3d_forward_kernel),
dim3(grid),
dim3(block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
output.mutable_data_ptr<scalar_t>(),
oT,
oH,
oW,
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return output;
}
Tensor max_unpooling3d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling3d_forward_out_cuda(
self, indices, output_size, stride, padding, output);
return output;
}
at::Tensor& max_unpooling2d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& grad_input) {
int64_t oheight = output_size[0];
int64_t owidth = output_size[1];
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got type: ", indices_.scalar_type());
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2},
self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4};
checkAllSameGPU(
"max_unpooling2d_backward_out_cuda",
{grad_input_arg, grad_output_arg, self_arg, indices_arg});
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ",
self_);
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(output_size.size() == 2, "output_size must have two elements, got size: ", output_size.size());
int64_t nInputCols, nInputRows, nInputPlane;
int dimw = 2;
int dimh = 1;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 3) {
nInputPlane = self.size(0);
} else {
++dimw;
++dimh;
nInputPlane = self.size(1);
}
nInputCols = self.size(dimw);
nInputRows = self.size(dimh);
if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. output height: ",
oheight,
", output width= ",
owidth,
", gradOutput: ",
grad_output.size(dimh),
"x",
grad_output.size(dimw));
}
grad_input.resize_as_(self);
grad_input.zero_();
int64_t count = self.numel();
if (count == 0) {
return grad_input;
}
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] {
hipLaunchKernelGGL(( max_unpooling2d_backward_kernel),
dim3(GET_BLOCKS(count)),
dim3(CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
grad_output.const_data_ptr<scalar_t>(),
indices.const_data_ptr<int64_t>(),
nInputPlane,
nInputRows,
nInputCols,
oheight,
owidth,
grad_input.mutable_data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}));
return grad_input;
}
at::Tensor max_unpooling2d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling2d_backward_out_cuda(
grad_output, self, indices, output_size, grad_input);
return grad_input;
}
at::Tensor& max_unpooling3d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& grad_input) {
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
max_unpooling3d_shape_check(
self_, grad_output_, indices_, output_size, stride, padding, "max_unpooling3d_backward_out_cuda()");
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int64_t inputHeight = 0;
int64_t inputWidth = 0;
TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2},
grad_output_arg{grad_output_, "grad_output_", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"max_unpooling3d_backward_out_cuda",
{self_arg, indices_arg, grad_output_arg, grad_input_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
}
grad_input.resize_as_(self);
grad_input.zero_();
// Collapse batch and feature dimensions if needed
auto grad_input_reshaped = grad_input;
if (grad_input.ndimension() == 5) {
grad_input_reshaped =
grad_input.reshape({grad_input.size(0) * grad_input.size(1),
grad_input.size(2),
grad_input.size(3),
grad_input.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (grad_input.numel() == 0) {
return grad_input;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_unpooling3d_backward_kernel),
dim3(grid),
dim3(block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output.const_data_ptr<scalar_t>(),
oT,
oH,
oW,
indices.packed_accessor64<int64_t, 4>(),
grad_input_reshaped.packed_accessor64<scalar_t, 4>(),
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return grad_input;
}
at::Tensor max_unpooling3d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling3d_backward_out_cuda(
grad_output, self, indices, output_size, stride, padding, grad_input);
return grad_input;
}
} // namespace at::native
| 0a35a60521c9b5c3c9b8cda083215621276b8ea1.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#endif
namespace at::native {
using namespace at::cuda::detail;
template <typename T>
__host__ __device__ __forceinline__ T ceilDiv(T a, T b) {
return (a + b - 1) / b;
}
template <typename T>
__global__ void max_unpooling2d_forward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
int64_t outputImageSize = outputHeight * outputWidth;
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
output += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
CUDA_KERNEL_ASSERT(maxind >= 0 && maxind < outputImageSize);
output[maxind] = input[linearIndex];
}
}
template <typename T>
__global__ void max_unpooling3d_forward_kernel(
PackedTensorAccessor64<T, 4> input,
PackedTensorAccessor64<int64_t, 4> indices,
T* output,
const int64_t oT,
const int64_t oH,
const int64_t oW,
const int64_t offsetZ) {
int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y;
int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time
int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature
int64_t outputImageSize = oT * oH * oW;
if (iRow < input.size(2) && iColumn < input.size(3)) {
T val = input[slice][iFrame][iRow][iColumn];
int64_t index = indices[slice][iFrame][iRow][iColumn];
CUDA_KERNEL_ASSERT(index >= 0 && index < outputImageSize);
output[slice * oT * oH * oW + index] = val;
}
}
template <typename T>
__global__ void max_unpooling2d_backward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
input += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[linearIndex] = input[maxind];
}
}
template <typename T>
__global__ void max_unpooling3d_backward_kernel(
const T* gradOutputData,
int64_t oT,
int64_t oH,
int64_t oW,
PackedTensorAccessor64<int64_t, 4> indices,
PackedTensorAccessor64<T, 4> gradInput,
int offsetZ) {
int iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time
int slice =
(blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature
if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) {
int64_t index = indices[slice][iFrame][iRow][iColumn];
T grad_val = gradOutputData[slice * oT * oH * oW + index];
gradInput[slice][iFrame][iRow][iColumn] = grad_val;
}
}
Tensor& max_unpooling2d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& output) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic with duplicate indices
at::globalContext().alertNotDeterministic("max_unpooling2d_forward_out");
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices_.scalar_type());
auto oheight = output_size[0];
auto owidth = output_size[1];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg});
for (int64_t i = 1; i < self_.ndimension(); ++i) {
TORCH_CHECK(self_.size(i) > 0, "max_unpooling2d_forward_out_cuda(): ",
"Expected input to have non-zero size for non-batch dimensions, but got ",
self_.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, but got tensor with dimension: ", self_.ndimension());
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(
output_size.size() == 2,
"There should be exactly two elements (width, height) in output_size, but got ", output_size.size(), " elements.");
int64_t dimw = 2;
int64_t dimh = 1;
int64_t numBatch = 1;
int64_t numChannels;
int64_t inputHeight;
int64_t inputWidth;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
if (self.ndimension() == 4) {
numBatch = self.size(0);
dimw++;
dimh++;
}
numChannels = self.size(dimh - 1);
inputHeight = self.size(dimh);
inputWidth = self.size(dimw);
output.resize_({numBatch, numChannels, oheight, owidth});
output.zero_();
auto count = self.numel();
if (count != 0) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] {
max_unpooling2d_forward_kernel<<<
GET_BLOCKS(count),
CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
self.numel(),
self.const_data_ptr<scalar_t>(),
indices.const_data_ptr<int64_t>(),
numChannels,
inputHeight,
inputWidth,
oheight,
owidth,
output.mutable_data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}));
}
if (self.ndimension() == 3) {
output.resize_({numChannels, oheight, owidth});
}
return output;
}
Tensor max_unpooling2d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling2d_forward_out_cuda(self, indices, output_size, output);
return output;
}
static void max_unpooling3d_shape_check(
const Tensor& input,
const Tensor& gradOutput,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
const char *fn_name) {
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TORCH_CHECK(
indices.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices.scalar_type());
TORCH_CHECK(
(input.ndimension() == 4 || input.ndimension() == 5),
"Input to max_unpooling3d should be a 4d or 5d Tensor, but got a tensor with dim ", input.ndimension());
TORCH_CHECK(
output_size.size() == 3,
"There should be exactly three elements (depth, height, width) in output_size, but got ", output_size.size(), " elements.");
TORCH_CHECK(
stride.size() == 3,
"There should be exactly three elements (depth, height, width) in stride, but got: ", stride.size(), " elements.");
TORCH_CHECK(
padding.size() == 3,
"There should be exactly three elements (depth, height, width) in padding, but got: ", padding.size(), " elements.");
TORCH_CHECK(
input.sizes() == indices.sizes(),
"Expected shape of indices to be: ", input.sizes(), " but got: ", indices.sizes());
for (int64_t i = 1; i < input.ndimension(); ++i) {
TORCH_CHECK(input.size(i) > 0, fn_name,
": Expected input to have non-zero size for non-batch dimensions, but got ",
input.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
stride[0] > 0 && stride[1] > 0 && stride[2] > 0,
"strides should be greater than zero, but got stride: ",
stride);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input.ndimension() == 5) {
dimw++;
dimh++;
dimt++;
dimn++;
}
int nslices = input.size(dimn);
if (gradOutput.defined()) {
if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) ||
oW != gradOutput.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. oT= ",
oT,
", oH= ",
oH,
", oW= ",
oW,
". gradOutput: ",
gradOutput.size(dimt),
"x",
gradOutput.size(dimh),
"x",
gradOutput.size(dimw));
}
TORCH_CHECK(
gradOutput.ndimension() == input.ndimension() &&
gradOutput.size(dimn) == nslices,
"gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices");
}
}
Tensor& max_unpooling3d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& output) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic with duplicate indices
at::globalContext().alertNotDeterministic("max_unpooling3d_forward_out");
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
max_unpooling3d_shape_check(
self_, Tensor(), indices_, output_size, stride, padding, "max_unpooling3d_forward_out_cuda()");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
int64_t batchSize;
int64_t inputSlices;
int64_t inputTime;
int64_t inputHeight;
int64_t inputWidth;
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
output.resize_({inputSlices, oT, oH, oW});
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
output.resize_({batchSize, inputSlices, oT, oH, oW});
}
output.zero_();
// Collapse batch and feature dimensions if needed
if (self.ndimension() == 5) {
self = self.reshape({self.size(0) * self.size(1),
self.size(2),
self.size(3),
self.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (self.numel() == 0) {
return output;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_unpooling3d_forward_kernel<<<
grid,
block,
0,
at::cuda::getCurrentCUDAStream()>>>(
self.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
output.mutable_data_ptr<scalar_t>(),
oT,
oH,
oW,
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return output;
}
Tensor max_unpooling3d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling3d_forward_out_cuda(
self, indices, output_size, stride, padding, output);
return output;
}
at::Tensor& max_unpooling2d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& grad_input) {
int64_t oheight = output_size[0];
int64_t owidth = output_size[1];
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got type: ", indices_.scalar_type());
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2},
self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4};
checkAllSameGPU(
"max_unpooling2d_backward_out_cuda",
{grad_input_arg, grad_output_arg, self_arg, indices_arg});
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ",
self_);
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(output_size.size() == 2, "output_size must have two elements, got size: ", output_size.size());
int64_t nInputCols, nInputRows, nInputPlane;
int dimw = 2;
int dimh = 1;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 3) {
nInputPlane = self.size(0);
} else {
++dimw;
++dimh;
nInputPlane = self.size(1);
}
nInputCols = self.size(dimw);
nInputRows = self.size(dimh);
if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. output height: ",
oheight,
", output width= ",
owidth,
", gradOutput: ",
grad_output.size(dimh),
"x",
grad_output.size(dimw));
}
grad_input.resize_as_(self);
grad_input.zero_();
int64_t count = self.numel();
if (count == 0) {
return grad_input;
}
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] {
max_unpooling2d_backward_kernel<<<
GET_BLOCKS(count),
CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
count,
grad_output.const_data_ptr<scalar_t>(),
indices.const_data_ptr<int64_t>(),
nInputPlane,
nInputRows,
nInputCols,
oheight,
owidth,
grad_input.mutable_data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}));
return grad_input;
}
at::Tensor max_unpooling2d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling2d_backward_out_cuda(
grad_output, self, indices, output_size, grad_input);
return grad_input;
}
at::Tensor& max_unpooling3d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& grad_input) {
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
max_unpooling3d_shape_check(
self_, grad_output_, indices_, output_size, stride, padding, "max_unpooling3d_backward_out_cuda()");
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int64_t inputHeight = 0;
int64_t inputWidth = 0;
TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2},
grad_output_arg{grad_output_, "grad_output_", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"max_unpooling3d_backward_out_cuda",
{self_arg, indices_arg, grad_output_arg, grad_input_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
}
grad_input.resize_as_(self);
grad_input.zero_();
// Collapse batch and feature dimensions if needed
auto grad_input_reshaped = grad_input;
if (grad_input.ndimension() == 5) {
grad_input_reshaped =
grad_input.reshape({grad_input.size(0) * grad_input.size(1),
grad_input.size(2),
grad_input.size(3),
grad_input.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (grad_input.numel() == 0) {
return grad_input;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_unpooling3d_backward_kernel<<<
grid,
block,
0,
at::cuda::getCurrentCUDAStream()>>>(
grad_output.const_data_ptr<scalar_t>(),
oT,
oH,
oW,
indices.packed_accessor64<int64_t, 4>(),
grad_input_reshaped.packed_accessor64<scalar_t, 4>(),
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return grad_input;
}
at::Tensor max_unpooling3d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling3d_backward_out_cuda(
grad_output, self, indices, output_size, stride, padding, grad_input);
return grad_input;
}
} // namespace at::native
|
b3daa0b9d1d02f21e2fea42335e3aa08f31b0d96.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2015, Tayler Hetherington
// The University of British Columbia
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
// Neither the name of The University of British Columbia nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
* cuda_gpu_nom_memcached.cu
*/
// Set Associative version of the hash table
// CUDA utilities and system includes
#ifndef __CUDA_VERSION__
//#define __
#endif
#include <hip/hip_runtime.h>
#include <host_defines.h>
#include <device_launch_parameters.h>
#include <stdio.h>
//#define DEBUG // Uncomment to enable some debugging
// If this is set, MemcachedGPU sends back the 8Byte Memcached header with the response
//#define LATENCY_MEASURE
// If this is set, the response packet is a constant size (RESPONSE_SIZE) independent from the Memcached packet
// The packet header/checksum can be computed earlier in parallel with the Memcached lookup.
//#define CONSTANT_RESPONSE_SIZE
#define RESPONSE_SIZE 72 //80 // 72 for peak throughput, 80 for latency test
#define USE_KEY_HASH
#define KEY_HASH_MASK 0x0000000FF
#define SET_ASSOC_SIZE 16
#define RESPONSE_HDR_STRIDE 256
#define NETWORK_PKT_SIZE 42
/*************************************/
#define REQUEST_GROUP_SIZE 128 // Don't change // Number of requests per group (subset of batch)
#define MAX_THREADS_PER_BLOCK 256 // Number of threads per request group
#define NUM_REQUESTS_PER_GROUP 256 // Do not change
/*************************************/
// This should be changed to match the number of requests per batch in GNoM_KM and GNoM_User
// (Should match NUM_REQUESTS_PER_BATCH in GNoM_km/gpu_km_shared.h)
#define NUM_REQUESTS_PER_BATCH 512 //256
#define NUM_THREADS_PER_GROUP NUM_REQUESTS_PER_GROUP*2 // NUM_REQUESTS_PER_BATCH*2
#define NUM_GROUPS NUM_REQUESTS_PER_BATCH / NUM_REQUESTS_PER_GROUP
/*************************************/
// Note: Currently the Tesla can have 2KB RX buffers, but the Maxwell
// requires smaller buffers due to the smaller memory
#define RX_BUFFER_SZ 2048
//#define RX_BUFFER_SZ 1024
#define UDP_PORT 9960
#define ETH_ALEN 6
#define IPPROTO_UDP 17
// Smaller max key size for testing.
#define MAX_KEY_SIZE 140 //250
#define UNLOCKED 0 // No lock set
#define SHARED_LOCK 1 // GET request(s) have the item locked
#define PRIVATE_LOCK 2 // SET request has the item locked. Only a single PRIVATE_LOCK can be obtained at a time.
#define G_HTONS(val) (u_int16_t) ((((u_int16_t)val >> 8) & 0x00FF ) | (((u_int16_t)val << 8) & 0xFF00) )
#define G_NTOHS(val) (G_HTONS(val))
#define G_HTONL(val) (u_int32_t) ( (((u_int32_t)val & 0xFF000000) >> 24 ) | \
(((u_int32_t)val & 0x00FF0000) >> 8 ) | \
(((u_int32_t)val & 0x0000FF00) << 8 ) | \
(((u_int32_t)val & 0x000000FF) << 24))
#define G_NTOHL(val) (G_HTONL(val))
#define hashsize(n) ((unsigned int)1<<(n))
#define hashmask(n) (hashsize(n)-1)
typedef unsigned int rel_time_t;
// Placeholder for Memcached item pointers
typedef void item;
typedef struct _ether_header{
u_int8_t ether_dhost[ETH_ALEN]; /* destination eth addr */
u_int8_t ether_shost[ETH_ALEN]; /* source ether addr */
u_int16_t ether_type; /* packet type ID field */
}ether_header;
typedef struct _ip_header {
u_int8_t version; /* version */ // Version+ihl = 8 bits, so replace ihl with 8bit version
//u_int32_t ihl:4; /* header length */
u_int8_t tos; /* type of service */
u_int16_t tot_len; /* total length */
u_int16_t id; /* identification */
u_int16_t frag_off; /* fragment offset field */
u_int8_t ttl; /* time to live */
u_int8_t protocol; /* protocol */
u_int16_t check; /* checksum */
u_int16_t saddr1; /* source and dest address */
u_int16_t saddr2;
u_int16_t daddr1;
u_int16_t daddr2;
}ip_header;
typedef struct _udp_header {
u_int16_t source; /* source port */
u_int16_t dest; /* destination port */
u_int16_t len; /* udp length */
u_int16_t check; /* udp checksum */
}udp_header;
typedef struct _memc_hdr_{
u_int8_t hdr[14]; // Only 8 Bytes, but padding an extra 4 bytes for memcpy purposes
}memc_hdr;
typedef struct _pkt_memc_hdr_{
ether_header eh;
ip_header iph;
udp_header udp;
memc_hdr mch;
}pkt_memc_hdr;
typedef struct _pkt_res_memc_hdr_{
ether_header eh;
ip_header iph;
udp_header udp;
char valstr_key[RESPONSE_HDR_STRIDE - NETWORK_PKT_SIZE];
}pkt_res_memc_hdr;
typedef struct _mod_pkt_info_{
item *it; // CPU VA pointer to found item
unsigned pkt_length; // Total length of response packet => Packet UDP header + "VALUE " + key + suffix + data (with "\r\n")
int hv; // Hash value
int is_get_req;
pkt_memc_hdr nmch; // Packet header + memc 8 Byte header
}mod_pkt_info;
typedef unsigned char uint8_t;
typedef struct _key_ {
unsigned key_len;
char key[MAX_KEY_SIZE];
} _key_;
// Forward declarations
__device__ int d_memcmp(const void *key1, const void *key2, int num){
const unsigned *p1 = (const unsigned* )key1;
const unsigned *p2 = (const unsigned* )key2;
int main_loop = num / sizeof(int);
int extra_loop = num % sizeof(int);
for(unsigned i=0; i<main_loop; i++){
unsigned diff = *(p1 + i) - *(p2 + i);
if( diff != 0){
return 0;
}
}
const char * p12 = ( const char * )key1;
const char * p22 = (const char*)key2;
for(unsigned i=main_loop*sizeof(int); i<extra_loop+main_loop*sizeof(int); i++){
unsigned char diff = *( p12 + i ) - *( p22 + i );
if( diff != 0){
return 0;
}
}
return 1;
}
// NOTE: This requires key lengths to be in increments 4 bytes
__device__ int fast_memcmp(const void *key1, const void *key2, int num){
const unsigned *p1 = (const unsigned* )key1;
const unsigned *p2 = (const unsigned* )key2;
int main_loop = num / sizeof(int);
for(unsigned i=0; i<main_loop; i++){
if(*(p1+i) != *(p2+i)){
return 0;
}
}
return 1;
}
// Compare char by char
__device__ int slow_memcmp(const char *key1, const char *key2, int num){
unsigned i=0;
int flag = 1;
for(i=0; i<num; i++){
if(key1[i] != key2[i]){
flag = 0;
break;
}
}
return flag;
}
/***********************************************/
/***********************************************/
// Bob Jenkin's hash from baseline Memcached
/***********************************************/
/***********************************************/
#define rot(x,k) (((x)<<(k)) ^ ((x)>>(32-(k))))
#define memcached_mix(a,b,c) \
{ \
a -= c; a ^= rot(c, 4); c += b; \
b -= a; b ^= rot(a, 6); a += c; \
c -= b; c ^= rot(b, 8); b += a; \
a -= c; a ^= rot(c,16); c += b; \
b -= a; b ^= rot(a,19); a += c; \
c -= b; c ^= rot(b, 4); b += a; \
}
#define final(a,b,c) \
{ \
c ^= b; c -= rot(b,14); \
a ^= c; a -= rot(c,11); \
b ^= a; b -= rot(a,25); \
c ^= b; c -= rot(b,16); \
a ^= c; a -= rot(c,4); \
b ^= a; b -= rot(a,14); \
c ^= b; c -= rot(b,24); \
}
__device__ unsigned int hash( char const * key, /* the key to hash */
size_t length, /* length of the key */
const unsigned int initval /* initval */){
unsigned int a,b,c; /* internal state */
union { const char *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
/* Set up the internal state */
a = b = c = 0xdeadbeef + ((unsigned int)length) + initval;
u.ptr = key;
if (((u.i & 0x3) == 0)) {
unsigned int const * k = ( unsigned int const *)key;
/*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
while (length > 12)
{
a += k[0];
b += k[1];
c += k[2];
memcached_mix(a,b,c);
length -= 12;
k += 3;
}
switch(length)
{
case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
case 8 : b+=k[1]; a+=k[0]; break;
case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
case 6 : b+=k[1]&0xffff; a+=k[0]; break;
case 5 : b+=k[1]&0xff; a+=k[0]; break;
case 4 : a+=k[0]; break;
case 3 : a+=k[0]&0xffffff; break;
case 2 : a+=k[0]&0xffff; break;
case 1 : a+=k[0]&0xff; break;
case 0 : return c; /* zero length strings require no mixing */
}
} else if (((u.i & 0x1) == 0)) {
unsigned short const * k = (unsigned short const *)key; /* read 16-bit chunks */
unsigned char const * k8;
/*--------------- all but last block: aligned reads and different mixing */
while (length > 12)
{
a += k[0] + (((unsigned int)k[1])<<16);
b += k[2] + (((unsigned int)k[3])<<16);
c += k[4] + (((unsigned int)k[5])<<16);
memcached_mix(a,b,c);
length -= 12;
k += 6;
}
/*----------------------------- handle the last (probably partial) block */
k8 = ( unsigned char const *)k;
switch(length)
{
case 12: c+=k[4]+(((unsigned int)k[5])<<16);
b+=k[2]+(((unsigned int)k[3])<<16);
a+=k[0]+(((unsigned int)k[1])<<16);
break;
case 11: c+=((unsigned int)k8[10])<<16; /* @fallthrough */
/* no break */
case 10: c+=k[4]; /* @fallthrough@ */
b+=k[2]+(((unsigned int)k[3])<<16);
a+=k[0]+(((unsigned int)k[1])<<16);
break;
case 9 : c+=k8[8]; /* @fallthrough */
case 8 : b+=k[2]+(((unsigned int)k[3])<<16);
a+=k[0]+(((unsigned int)k[1])<<16);
break;
case 7 : b+=((unsigned int)k8[6])<<16; /* @fallthrough */
case 6 : b+=k[2];
a+=k[0]+(((unsigned int)k[1])<<16);
break;
case 5 : b+=k8[4]; /* @fallthrough */
case 4 : a+=k[0]+(((unsigned int)k[1])<<16);
break;
case 3 : a+=((unsigned int)k8[2])<<16; /* @fallthrough */
case 2 : a+=k[0];
break;
case 1 : a+=k8[0];
break;
case 0 : return c; /* zero length strings require no mixing */
}
} else { /* need to read the key one byte at a time */
unsigned char const * k = ( unsigned char const *)key;
/*--------------- all but the last block: affect some 32 bits of (a,b,c) */
while (length > 12)
{
a += k[0];
a += ((unsigned int)k[1])<<8;
a += ((unsigned int)k[2])<<16;
a += ((unsigned int)k[3])<<24;
b += k[4];
b += ((unsigned int)k[5])<<8;
b += ((unsigned int)k[6])<<16;
b += ((unsigned int)k[7])<<24;
c += k[8];
c += ((unsigned int)k[9])<<8;
c += ((unsigned int)k[10])<<16;
c += ((unsigned int)k[11])<<24;
memcached_mix(a,b,c);
length -= 12;
k += 12;
}
/*-------------------------------- last block: affect all 32 bits of (c) */
switch(length) /* all the case statements fall through */
{
case 12: c+=((unsigned int)k[11])<<24;
case 11: c+=((unsigned int)k[10])<<16;
case 10: c+=((unsigned int)k[9])<<8;
case 9 : c+=k[8];
case 8 : b+=((unsigned int)k[7])<<24;
case 7 : b+=((unsigned int)k[6])<<16;
case 6 : b+=((unsigned int)k[5])<<8;
case 5 : b+=k[4];
case 4 : a+=((unsigned int)k[3])<<24;
case 3 : a+=((unsigned int)k[2])<<16;
case 2 : a+=((unsigned int)k[1])<<8;
case 1 : a+=k[0];
break;
case 0 : return c; /* zero length strings require no mixing */
}
}
final(a,b,c);
return c; /* zero length strings require no mixing */
}
/***********************************************/
/***********************************************/
// This checksum skips the ip_header length field, but adds up everything else.
// Later we can add in the length. Used to overlap independent computation to
// reduce processing latency
__device__ int partial_cksum(unsigned char *buf, unsigned nbytes, int sum) {
uint i;
/* Checksum all the pairs of bytes first... */
for (i = 0; i < (nbytes & ~1U); i += 2) {
if(i != 2){ // Bytes 2&3 are the IP header length field, skip it
sum += (u_int16_t) G_NTOHS(*((u_int16_t *)(buf + i)));
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
}
/* If there's a single byte left over, checksum it, too. Network
byte order is big-endian, so the remaining byte is the high byte. */
if(i < nbytes) {
sum += buf [i] << 8;
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
return sum;
}
// Only add up the ip header length once we know the response packet size
__device__ int cksum_hdr_len_only(unsigned char *buf, int sum){
sum += (u_int16_t) G_NTOHS(*((u_int16_t *)(buf + 2)));
if(sum > 0xFFFF)
sum -= 0xFFFF;
return sum;
}
// Full checksum
/*
* Checksum routine for Internet Protocol family headers (C Version)
*
* Borrowed from DHCPd
*/
__device__ int in_cksum(unsigned char *buf, unsigned nbytes, int sum) {
uint i;
/* Checksum all the pairs of bytes first... */
for (i = 0; i < (nbytes & ~1U); i += 2) {
sum += (u_int16_t) G_NTOHS(*((u_int16_t *)(buf + i)));
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
/* If there's a single byte left over, checksum it, too. Network
byte order is big-endian, so the remaining byte is the high byte. */
if(i < nbytes) {
sum += buf [i] << 8;
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
return sum;
}
/* ******************************************* */
__device__ int wrapsum (u_int32_t sum) {
sum = ~sum & 0xFFFF;
return G_NTOHS(sum);
}
/* ******************************************* */
typedef struct _gpu_primary_hashtable_{
void *item_ptr;
rel_time_t last_accessed_time;
unsigned valid;
#ifdef USE_KEY_HASH
unsigned key_hash; // 8-bit key hash - using 4 bytes to keep everything aligned
#endif
unsigned key_length;
unsigned pkt_length;
char key[MAX_KEY_SIZE];
}gpu_primary_hashtable;
typedef struct _gpu_set_req_{
void *item_ptr;
unsigned init_hv;
unsigned key_length;
unsigned pkt_length;
char key[MAX_KEY_SIZE];
}gpu_set_req;
typedef struct _gpu_set_res_{
int host_signal;
int is_evicted;
int is_last_get;
unsigned evicted_hv;
unsigned evicted_lru_timestamp;
void *evitcted_ptr;
}gpu_set_res;
// Forward declarations
__device__ void mod_parse_pkt( unsigned long long first_RX_buffer_ptr,
int local_tid,
int logical_tid,
int thread_type,
mod_pkt_info *mpi,
_key_ *g_key );
__device__ int mod_process_get_request( mod_pkt_info *mpi,
int hashpower,
rel_time_t time,
volatile gpu_primary_hashtable *g_primary_hashtable,
_key_ *g_key,
int *gpu_hash_lock );
__device__ void mod_create_response_header(mod_pkt_info *mpi, int helper_tid);
__device__ void mod_populate_response(size_t *res_mem, mod_pkt_info *mpi, int tid, int helper_tid, int group_id, int *item_is_found, unsigned thread_type, int cta_id, _key_ *g_key);
extern "C" __global__ void memcached_SET_kernel(int *req_mem,
int *res_mem,
int hashpower, // Memcached hashpower
unsigned int *gpu_hashtable, // GPU resident Memcached hashtable
int *gpu_hash_lock, // GPU resident locks for hashtable
rel_time_t timestamp){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int ret=0;
__shared__ unsigned hv;
__shared__ unsigned set_index;
__shared__ unsigned set_hv_index;
__shared__ unsigned insert_hv_index;
__shared__ unsigned key_hash_t;
__shared__ unsigned evict_lru_timestamp;
volatile gpu_primary_hashtable *g_primary_hashtable = (volatile gpu_primary_hashtable *)gpu_hashtable;
gpu_set_req *m_gph = (gpu_set_req *)req_mem;
gpu_set_res *m_gsr = (gpu_set_res *)res_mem;
volatile gpu_primary_hashtable *temp_gph;
volatile gpu_primary_hashtable *gph;
int oldest_item_hv = -1;
size_t oldest_item_time = 0xFFFFFFFFFFFFFFFF;
int free_found = 0;
int is_locked = 0;
int old_lock_val = 0;
unsigned num_sets = hashsize(hashpower) / SET_ASSOC_SIZE;
#ifdef DEBUG
unsigned hv = 0;
if(tid==0){
hv = hash(m_gph->key, m_gph->key_length, 0);
if(hv != m_gph->init_hv){
printf("HASH VALUES NOT EQUAL!!\n");
}
}
#endif
m_gsr->is_evicted = 0; // Set initial to SET eviction. May end up finding a free entry (0) or being a SET hit (2)
// Set Assoc Hashing - Search for a free spot within the set from init_hv
if(tid == 0){
hv = m_gph->init_hv; // Grab the hash value from the CPU calculation
set_index = hv % num_sets; // Calculate the set index for this hash value
set_hv_index = set_index*SET_ASSOC_SIZE; // Move to the correct location in the hash table for this set
key_hash_t = hv & KEY_HASH_MASK; // Calcualte the hash mask
// Lock the current set
while(!is_locked){
old_lock_val = atomicCAS(&gpu_hash_lock[set_index], 0, -1);
if(old_lock_val == UNLOCKED){
is_locked = 1;
}
}
for(unsigned i=0; i<SET_ASSOC_SIZE; ++i){
temp_gph = (volatile gpu_primary_hashtable *)&g_primary_hashtable[set_hv_index + i]; // Index into the hashtable at this set
if(temp_gph->valid > 0){ // This hash location is already occupied, check the next location
// First check key hash. If equal, then do key comparison. Otherwise, no way they're equal.
if(temp_gph->key_hash == key_hash_t){
// If key hash matches, check complete key
ret = fast_memcmp((const void *)m_gph->key, (const void *)temp_gph->key, m_gph->key_length);
if(ret == 1){
// If matches, select this entry to overwrite. Set matching key-value pair to evict.
// This is required to ensure correct ordering on the CPU post processing
// Treat this the same as an LRU evict
oldest_item_time = temp_gph->last_accessed_time;
oldest_item_hv = (set_hv_index+i);
free_found = 0;
m_gsr->is_evicted = 2; // Set to SET hit
break;
}
}
// If no hit, update LRU status for this set
if((temp_gph->last_accessed_time < oldest_item_time) || (oldest_item_hv == -1)){
oldest_item_time = temp_gph->last_accessed_time;
oldest_item_hv = (set_hv_index+i);
}
}else{
// No need to search the whole set if an invalid entry is found
free_found = 1;
insert_hv_index = (set_hv_index + i);
break;
}
}
if(!free_found){
// Didn't find any free spots... Need to evict an item with the oldest timestamp within the set
insert_hv_index = oldest_item_hv;
evict_lru_timestamp = oldest_item_time;
if(m_gsr->is_evicted == 0){
m_gsr->is_evicted = 1;
}
}
}
__syncthreads();
__threadfence();
gph = (volatile gpu_primary_hashtable *)&g_primary_hashtable[insert_hv_index]; // Index into the hashtable
unsigned int *temp_key_src = (unsigned int *)m_gph->key;
unsigned int *temp_key_dst = (unsigned int *)gph->key;
// Block memory copy with all threads in the warp (max key size of 128 with this code)
if(tid < 32){
temp_key_dst[tid] = temp_key_src[tid]; // Copy the key over (Maybe overwriting previous key)
}
__syncthreads();
__threadfence();
if(tid == 0){
if(!free_found){
m_gsr->evicted_hv = oldest_item_hv;
m_gsr->evitcted_ptr = gph->item_ptr;
m_gsr->evicted_lru_timestamp = evict_lru_timestamp;
}
// Set 8-bit key hash
gph->key_hash = hv & KEY_HASH_MASK;
gph->item_ptr = m_gph->item_ptr;
gph->key_length = m_gph->key_length;
gph->pkt_length = m_gph->pkt_length;
// Record whether the last access was a SET or GET request
if(gph->valid == 1){
m_gsr->is_last_get = 0;
}else if(gph->valid == 2){
m_gsr->is_last_get = 1;
}
gph->valid = 1;
gph->last_accessed_time = (unsigned)timestamp;
#ifdef DEBUG
// DEBUG: Verify stored KEY matches
int ret = 0;
ret = d_memcmp((const void *)m_gph->key, (const void *)gph->key, m_gph->key_length);
if(ret != 1){
printf("KEYS NOT EQUAL!!\n");
}
#endif
gpu_hash_lock[set_index] = UNLOCKED; // Unlock the set
}
__threadfence_system();
/************************ End Critical Section ************************/
}
extern "C" __global__ void memcached_GET_kernel(unsigned long long first_req_addr, // Address of first CUDA buffer containing a valid packet
int num_req, // # of requests
int *response_mem, // Memory allocated for responses
int hashpower, // Memcached hashpower
unsigned int *gpu_hashtable, // GPU resident Memcached hashtable
int *gpu_hash_lock, // GPU resident locks for hashtable
rel_time_t timestamp){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int local_tid = threadIdx.x;
int thread_type = ((local_tid % MAX_THREADS_PER_BLOCK) < (MAX_THREADS_PER_BLOCK / 2)) ? 0 : 1;// 0 means actual request threads, 1 means helper threads
// This represents the request # that each thread will be responsible for. Request threads
// will be from 0->NUM_REQUESTS_PER_GROUP
// Each block handles 128 requests (minimum requests/batch), 256 threads per 128 requests.
int group_id;
if(local_tid < MAX_THREADS_PER_BLOCK){
group_id = 0;
}else{
group_id = 1;
}
int half_group_size = MAX_THREADS_PER_BLOCK/2; // ==> 256/2 = 128
int logical_tid = -1;
if(thread_type == 0){
logical_tid = (group_id * half_group_size) + (tid % half_group_size); // First half looks
}else{
logical_tid = (group_id * half_group_size) + ( (tid-half_group_size) % half_group_size);
}
_key_ m_key; // Local key per thread
volatile gpu_primary_hashtable *g_primary_hashtable = (volatile gpu_primary_hashtable *)gpu_hashtable; // Global Memcached Hash Table
__shared__ mod_pkt_info mpi[NUM_REQUESTS_PER_GROUP];
__shared__ int item_is_found[NUM_REQUESTS_PER_GROUP];
m_key.key_len = 0;
// Address of first packet. All other packets are pkt#*RX_BUFFER_SZ away from first_req_addr
unsigned long long m_first_RX_buffer_addr = first_req_addr + (blockIdx.x * RX_BUFFER_SZ * NUM_REQUESTS_PER_GROUP);
mod_parse_pkt(m_first_RX_buffer_addr, local_tid, logical_tid, thread_type, mpi, &m_key);
__syncthreads();
__threadfence();
#ifdef DEBUG
if(mpi[logical_tid].is_get_req){
printf("GET:::tid: %d, local_tid: %d, thread_type: %d, group_id: %d, logical_tid: %d\n", tid, local_tid, thread_type, group_id, logical_tid);
}else{
printf("FAIL:::tid: %d, local_tid: %d, thread_type: %d, group_id: %d, logical_tid: %d\n", tid, local_tid, thread_type, group_id, logical_tid);
}
#endif
if(mpi[logical_tid].is_get_req){
if(thread_type == 0){
item_is_found[logical_tid] = mod_process_get_request(&mpi[logical_tid], hashpower, timestamp, g_primary_hashtable, &m_key, gpu_hash_lock);
}else{
mod_create_response_header(&mpi[logical_tid], logical_tid);
}
}
__syncthreads();
__threadfence();
mod_populate_response((size_t *)response_mem, mpi, local_tid, logical_tid, group_id, item_is_found, thread_type, blockIdx.x, &m_key);
__syncthreads();
__threadfence_system();
}
// TODO: The coalesced packet load is currently hardcoded to 256 requests per sub-group
// and 512 threads per group. This likely doesn't need to change, but it could be
// made configurable.
#define NUM_REQ_PER_LOOP 16
#define WARP_SIZE 32
#ifdef LATENCY_MEASURE
#define THREAD_PER_HDR_COPY 14 // 14 threads * 4 bytes = 56 bytes / hdr = 42 byte header + 8 byte memc hdr + "value "
#else
#define THREAD_PER_HDR_COPY 13 // 13 threads * 4 bytes = 52 bytes / hdr
#endif
__device__ void mod_parse_pkt( unsigned long long first_RX_buffer_ptr,
int local_tid,
int logical_tid,
int thread_type,
mod_pkt_info *mpi,
_key_ *g_key){
const char *GET = "get ";
int *req_ptr = NULL;
int *pkt_hdr_ptr = NULL;
char *pkt_hdr = NULL;
int ehs = sizeof(ether_header);
int ips = sizeof(ip_header);
int udps = sizeof(udp_header);
unsigned network_size = ehs + ips + udps;
ip_header *iph;
udp_header *udp;
char *payload;
char *key;
int count = 0;
u_int16_t check = 0;
int req_ind = (int)(local_tid / WARP_SIZE); // Which warp do you belong to?
req_ind *= NUM_REQ_PER_LOOP;
int w_tid = local_tid % WARP_SIZE;
int masked_ind = w_tid % THREAD_PER_HDR_COPY;
/**********************************************************/
// Load packet headers from global to shared memory *coalesced accesses*
// "LOAD PACKETS" stage from the SoCC paper.
for(unsigned i=0; i<NUM_REQ_PER_LOOP; ++i){
req_ptr = (int *)( first_RX_buffer_ptr + ((req_ind + i)*RX_BUFFER_SZ) );
pkt_hdr_ptr = (int *)(&mpi[req_ind + i].nmch);
pkt_hdr_ptr[masked_ind] = req_ptr[masked_ind];
}
__syncthreads();
// "PARSE UDP PACKET" stage from the SoCC paper
// The packet header contents are all in shared memory, now verify the packet contents (still in global mem)
mpi[logical_tid].is_get_req = 1; // Assume all are UDP Memcached GET requests
if(thread_type == 0){
pkt_hdr = (char *)&mpi[logical_tid].nmch;
iph = (ip_header *)(pkt_hdr + ehs);
udp = (udp_header *)(pkt_hdr + ehs + ips);
payload = (char *)(first_RX_buffer_ptr + (logical_tid*RX_BUFFER_SZ));
payload += (network_size+8);
if(G_NTOHS(udp->dest) != UDP_PORT){
mpi[logical_tid].is_get_req = 0;
#ifdef DEBUG
printf("UDP_PORT WRONG (%hu)\n", G_NTOHS(udp->dest));
#endif
}
// Verify Checksum
// Lower 4-bits of version is the ip_header length (ihl)
if(iph->check != 0){
check = wrapsum(in_cksum((unsigned char *)iph, (iph->version & 0x0F)<<2, 0));
if(check != 0){
mpi[logical_tid].is_get_req = 0;
}
}
if(mpi[logical_tid].is_get_req){
for(unsigned i=0; i<3; ++i){
if(payload[i] != GET[i]){
mpi[logical_tid].is_get_req = 0;
}
}
}
key = payload+4; // Move passed "get "
if(mpi[logical_tid].is_get_req){
// key is guaranteed to be a minimum of 16 bytes, load in 16 bytes as shorts.
for(unsigned i=0; i<8; i++, count += 2){
((short *)(g_key->key))[i] = ((short *)(key))[i];
}
// Then load in the rest, searching for the end condition
while( (key[count] != '\r') || (key[count+1] != '\n') ){
g_key->key[count] = key[count];
count++;
}
// Check if key is too large
if(count >= MAX_KEY_SIZE){
mpi[logical_tid].is_get_req = 0;
}
}
// Set the key length
g_key->key_len = count;
}
}
// Actual Memcached hash + key lookup.
// "Network Service Processing" stage in the SoCC paper
__device__ int mod_process_get_request(mod_pkt_info *mpi, int hashpower, rel_time_t time,
volatile gpu_primary_hashtable *g_primary_hashtable,
_key_ *g_key,
int *gpu_hash_lock){
unsigned hv;
int ret = 0;
size_t nkey = g_key->key_len;
char *key = g_key->key;
volatile char *key_t;
unsigned key_hash_t;
volatile gpu_primary_hashtable *m_gph;
int is_locked = 0;
volatile int old_lock_val = -1;
volatile int new_lock_val = 0;
volatile int new_old_lock_val = 0;
unsigned set_index;
unsigned set_hv_index;
unsigned key_hash = 0;
// Compute the hash
hv = hash(key, nkey, 0);
key_hash = hv & KEY_HASH_MASK; // Compute the hash mask for this key
// Compute the set index for the hash and the corresponding index into the hash table
unsigned num_sets = hashsize(hashpower) / SET_ASSOC_SIZE;
set_index = hv % num_sets; // Calculate the set index for this hash value
set_hv_index = set_index*SET_ASSOC_SIZE; // Move to the correct location in the hash table for this set
// Soft mutex for each GET request. Multiple shared_locks, only single private_lock.
// Grab the shared lock for the set
while(!is_locked){
old_lock_val = gpu_hash_lock[set_index];
if(old_lock_val != -1){ // TEST
new_lock_val = old_lock_val+1;
new_old_lock_val = atomicCAS(&gpu_hash_lock[set_index], old_lock_val, new_lock_val); // and TEST and SET
if(new_old_lock_val == old_lock_val){
is_locked = 1;
}
}
}
// Set initial response length if item isn't found
mpi->pkt_length = RESPONSE_SIZE;
/************************ Critical Section ************************/
for(unsigned i=0; i<SET_ASSOC_SIZE; ++i){
m_gph = (volatile gpu_primary_hashtable *)&g_primary_hashtable[set_hv_index + i];
if(m_gph->valid > 0){
key_t = (volatile char *)m_gph->key;
// New - First check key hash. If equal, then do key comparison. Otherwise, no way they're equal.
key_hash_t = m_gph->key_hash;
if(key_hash == key_hash_t){
ret = fast_memcmp((const void *)key, (const void *)key_t, nkey);
if(ret){
mpi->it = (item *)m_gph->item_ptr; // Update response pointer
#ifndef CONSTANT_RESPONSE_SIZE
mpi->pkt_length = m_gph->pkt_length; // Update value length for response packet size
#endif
m_gph->last_accessed_time = time; // Possible Race Condition if multiple GETs updating this concurrently, but don't care who wins
m_gph->valid = 2; // Update hash table entry to say that last access was a GET request
break;
}
}
}
}
// Unlock the set
atomicSub(&gpu_hash_lock[set_index], 1);
/************************ End Critical Section ************************/
return ret;
}
__device__ void mod_create_response_header(mod_pkt_info *mpi, int helper_tid){
// m_res points to correct response memory for this helper_thread
// mpi contains unmodified packet header, modify in shared memory
// Elements to swap
u_int8_t ether_swap;
u_int16_t ip_addr1;
u_int16_t ip_addr2;
u_int16_t udp_port;
const char *VALUE = "VALUE ";
char *header = (char *)(&mpi->nmch);
ether_header *eh = (ether_header *)header;
ip_header *iph = (ip_header *)&header[14];
udp_header *uh = (udp_header *)&header[34];
// Swap ether
for(unsigned i=0; i<ETH_ALEN; ++i){
ether_swap = eh->ether_shost[i];
eh->ether_shost[i] = eh->ether_dhost[i];
eh->ether_dhost[i] = ether_swap;
}
// Swap IP
ip_addr1 = iph->saddr1;
ip_addr2 = iph->saddr2;
iph->saddr1 = iph->daddr1;
iph->saddr2 = iph->daddr2;
iph->daddr1 = ip_addr1;
iph->daddr2 = ip_addr2;
iph->check = 0;
// Swap UDP port
udp_port = uh->source;
uh->source = uh->dest;
uh->dest = udp_port;
uh->check = 0;
#ifdef CONSTANT_RESPONSE_SIZE
// Assume a constant response packet and calculate the checksum
// (used to force response packets to be smaller than the request
// packets so we can reach peak 13 MRPS throughput with 16 Byte keys).
iph->tot_len = G_HTONS((RESPONSE_SIZE - sizeof(ether_header)));
uh->len = G_HTONS((RESPONSE_SIZE - sizeof(ether_header) - sizeof(ip_header)));
iph->check = wrapsum(in_cksum((unsigned char *)iph, 4*(iph->version & 0x0F), 0));
#else
// Calculate an initial partial checksum without the IP header length field.
// This will be added in afterwards
iph->check = partial_cksum((unsigned char *)iph, 4*(iph->version & 0x0F), 0);
#endif
// Copy in "VALUE "
#ifdef LATENCY_MEASURE
// If doing latency measurements, add the 8byte memc header before "VALUE " (Memc hdr used for a client timestamp)
mpi->nmch.mch.hdr[8] = VALUE[0];
mpi->nmch.mch.hdr[9] = VALUE[1];
mpi->nmch.mch.hdr[10] = VALUE[2];
mpi->nmch.mch.hdr[11] = VALUE[3];
mpi->nmch.mch.hdr[12] = VALUE[4];
mpi->nmch.mch.hdr[13] = VALUE[5];
#else
mpi->nmch.mch.hdr[0] = VALUE[0];
mpi->nmch.mch.hdr[1] = VALUE[1];
mpi->nmch.mch.hdr[2] = VALUE[2];
mpi->nmch.mch.hdr[3] = VALUE[3];
mpi->nmch.mch.hdr[4] = VALUE[4];
mpi->nmch.mch.hdr[5] = VALUE[5];
#endif
return;
}
__device__ void mod_populate_response(size_t *res_mem, mod_pkt_info *mpi, int local_tid, int logical_tid, int group_id, int *item_is_found, unsigned thread_type, int cta_id, _key_ *g_key){
int *res_ptr = NULL;
int *pkt_hdr_ptr = NULL;
int item_ptr_ind = cta_id*NUM_REQUESTS_PER_GROUP + logical_tid;
int req_ind = (int)(local_tid / WARP_SIZE); // Which warp this thread belongs to
req_ind *= NUM_REQ_PER_LOOP;
int w_tid = local_tid % WARP_SIZE;
int masked_ind = w_tid % THREAD_PER_HDR_COPY;
mod_pkt_info *m_mpi = &mpi[logical_tid];
pkt_res_memc_hdr *start_response_pkt_hdr_mem = (pkt_res_memc_hdr *)(res_mem + NUM_REQUESTS_PER_BATCH);
pkt_res_memc_hdr *response_pkt_hdr_mem = (pkt_res_memc_hdr *)&start_response_pkt_hdr_mem[cta_id*NUM_REQUESTS_PER_GROUP];
if(thread_type == 0){ // Thread_type 0 stores the found item pointers
//res_mem[item_ptr_ind] = (size_t)NULL;
if(item_is_found[logical_tid]){
res_mem[item_ptr_ind] = (size_t)mpi[logical_tid].it;
}else{
res_mem[item_ptr_ind] = (size_t)NULL;
}
}
#ifndef CONSTANT_RESPONSE_SIZE // If not using a constant response size, set the packet length fields and update checksum
else {
char *header = (char *)(&m_mpi->nmch);
ip_header *iph = (ip_header *)&header[14];
udp_header *uh = (udp_header *)&header[34];
// Update response packet lengths and compute IP checksum
iph->tot_len = G_HTONS((m_mpi->pkt_length - sizeof(ether_header)));
uh->len = G_HTONS((m_mpi->pkt_length - sizeof(ether_header) - sizeof(ip_header)));
// Already computed a partial checksum without the IP header length field.
// Add the updated length to the checksum.
iph->check = wrapsum(cksum_hdr_len_only((unsigned char *)iph, iph->check));
}
#endif
__syncthreads();
// Finally, store packet response headers from shared to global memory
for(unsigned i=0; i<NUM_REQ_PER_LOOP; ++i){
pkt_hdr_ptr = (int *)(&mpi[req_ind + i].nmch);
res_ptr = (int *)&response_pkt_hdr_mem[req_ind + i];
res_ptr[masked_ind] = pkt_hdr_ptr[masked_ind]; // This copies over the pkt hdr + "VALUE "
}
__syncthreads();
}
| b3daa0b9d1d02f21e2fea42335e3aa08f31b0d96.cu | // Copyright (c) 2015, Tayler Hetherington
// The University of British Columbia
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
// Neither the name of The University of British Columbia nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
* cuda_gpu_nom_memcached.cu
*/
// Set Associative version of the hash table
// CUDA utilities and system includes
#ifndef __CUDA_VERSION__
//#define __
#endif
#include <cuda_runtime.h>
#include <host_defines.h>
#include <device_launch_parameters.h>
#include <stdio.h>
//#define DEBUG // Uncomment to enable some debugging
// If this is set, MemcachedGPU sends back the 8Byte Memcached header with the response
//#define LATENCY_MEASURE
// If this is set, the response packet is a constant size (RESPONSE_SIZE) independent from the Memcached packet
// The packet header/checksum can be computed earlier in parallel with the Memcached lookup.
//#define CONSTANT_RESPONSE_SIZE
#define RESPONSE_SIZE 72 //80 // 72 for peak throughput, 80 for latency test
#define USE_KEY_HASH
#define KEY_HASH_MASK 0x0000000FF
#define SET_ASSOC_SIZE 16
#define RESPONSE_HDR_STRIDE 256
#define NETWORK_PKT_SIZE 42
/*************************************/
#define REQUEST_GROUP_SIZE 128 // Don't change // Number of requests per group (subset of batch)
#define MAX_THREADS_PER_BLOCK 256 // Number of threads per request group
#define NUM_REQUESTS_PER_GROUP 256 // Do not change
/*************************************/
// This should be changed to match the number of requests per batch in GNoM_KM and GNoM_User
// (Should match NUM_REQUESTS_PER_BATCH in GNoM_km/gpu_km_shared.h)
#define NUM_REQUESTS_PER_BATCH 512 //256
#define NUM_THREADS_PER_GROUP NUM_REQUESTS_PER_GROUP*2 // NUM_REQUESTS_PER_BATCH*2
#define NUM_GROUPS NUM_REQUESTS_PER_BATCH / NUM_REQUESTS_PER_GROUP
/*************************************/
// Note: Currently the Tesla can have 2KB RX buffers, but the Maxwell
// requires smaller buffers due to the smaller memory
#define RX_BUFFER_SZ 2048
//#define RX_BUFFER_SZ 1024
#define UDP_PORT 9960
#define ETH_ALEN 6
#define IPPROTO_UDP 17
// Smaller max key size for testing.
#define MAX_KEY_SIZE 140 //250
#define UNLOCKED 0 // No lock set
#define SHARED_LOCK 1 // GET request(s) have the item locked
#define PRIVATE_LOCK 2 // SET request has the item locked. Only a single PRIVATE_LOCK can be obtained at a time.
#define G_HTONS(val) (u_int16_t) ((((u_int16_t)val >> 8) & 0x00FF ) | (((u_int16_t)val << 8) & 0xFF00) )
#define G_NTOHS(val) (G_HTONS(val))
#define G_HTONL(val) (u_int32_t) ( (((u_int32_t)val & 0xFF000000) >> 24 ) | \
(((u_int32_t)val & 0x00FF0000) >> 8 ) | \
(((u_int32_t)val & 0x0000FF00) << 8 ) | \
(((u_int32_t)val & 0x000000FF) << 24))
#define G_NTOHL(val) (G_HTONL(val))
#define hashsize(n) ((unsigned int)1<<(n))
#define hashmask(n) (hashsize(n)-1)
typedef unsigned int rel_time_t;
// Placeholder for Memcached item pointers
typedef void item;
typedef struct _ether_header{
u_int8_t ether_dhost[ETH_ALEN]; /* destination eth addr */
u_int8_t ether_shost[ETH_ALEN]; /* source ether addr */
u_int16_t ether_type; /* packet type ID field */
}ether_header;
typedef struct _ip_header {
u_int8_t version; /* version */ // Version+ihl = 8 bits, so replace ihl with 8bit version
//u_int32_t ihl:4; /* header length */
u_int8_t tos; /* type of service */
u_int16_t tot_len; /* total length */
u_int16_t id; /* identification */
u_int16_t frag_off; /* fragment offset field */
u_int8_t ttl; /* time to live */
u_int8_t protocol; /* protocol */
u_int16_t check; /* checksum */
u_int16_t saddr1; /* source and dest address */
u_int16_t saddr2;
u_int16_t daddr1;
u_int16_t daddr2;
}ip_header;
typedef struct _udp_header {
u_int16_t source; /* source port */
u_int16_t dest; /* destination port */
u_int16_t len; /* udp length */
u_int16_t check; /* udp checksum */
}udp_header;
typedef struct _memc_hdr_{
u_int8_t hdr[14]; // Only 8 Bytes, but padding an extra 4 bytes for memcpy purposes
}memc_hdr;
typedef struct _pkt_memc_hdr_{
ether_header eh;
ip_header iph;
udp_header udp;
memc_hdr mch;
}pkt_memc_hdr;
typedef struct _pkt_res_memc_hdr_{
ether_header eh;
ip_header iph;
udp_header udp;
char valstr_key[RESPONSE_HDR_STRIDE - NETWORK_PKT_SIZE];
}pkt_res_memc_hdr;
typedef struct _mod_pkt_info_{
item *it; // CPU VA pointer to found item
unsigned pkt_length; // Total length of response packet => Packet UDP header + "VALUE " + key + suffix + data (with "\r\n")
int hv; // Hash value
int is_get_req;
pkt_memc_hdr nmch; // Packet header + memc 8 Byte header
}mod_pkt_info;
typedef unsigned char uint8_t;
typedef struct _key_ {
unsigned key_len;
char key[MAX_KEY_SIZE];
} _key_;
// Forward declarations
__device__ int d_memcmp(const void *key1, const void *key2, int num){
const unsigned *p1 = (const unsigned* )key1;
const unsigned *p2 = (const unsigned* )key2;
int main_loop = num / sizeof(int);
int extra_loop = num % sizeof(int);
for(unsigned i=0; i<main_loop; i++){
unsigned diff = *(p1 + i) - *(p2 + i);
if( diff != 0){
return 0;
}
}
const char * p12 = ( const char * )key1;
const char * p22 = (const char*)key2;
for(unsigned i=main_loop*sizeof(int); i<extra_loop+main_loop*sizeof(int); i++){
unsigned char diff = *( p12 + i ) - *( p22 + i );
if( diff != 0){
return 0;
}
}
return 1;
}
// NOTE: This requires key lengths to be in increments 4 bytes
__device__ int fast_memcmp(const void *key1, const void *key2, int num){
const unsigned *p1 = (const unsigned* )key1;
const unsigned *p2 = (const unsigned* )key2;
int main_loop = num / sizeof(int);
for(unsigned i=0; i<main_loop; i++){
if(*(p1+i) != *(p2+i)){
return 0;
}
}
return 1;
}
// Compare char by char
__device__ int slow_memcmp(const char *key1, const char *key2, int num){
unsigned i=0;
int flag = 1;
for(i=0; i<num; i++){
if(key1[i] != key2[i]){
flag = 0;
break;
}
}
return flag;
}
/***********************************************/
/***********************************************/
// Bob Jenkin's hash from baseline Memcached
/***********************************************/
/***********************************************/
#define rot(x,k) (((x)<<(k)) ^ ((x)>>(32-(k))))
#define memcached_mix(a,b,c) \
{ \
a -= c; a ^= rot(c, 4); c += b; \
b -= a; b ^= rot(a, 6); a += c; \
c -= b; c ^= rot(b, 8); b += a; \
a -= c; a ^= rot(c,16); c += b; \
b -= a; b ^= rot(a,19); a += c; \
c -= b; c ^= rot(b, 4); b += a; \
}
#define final(a,b,c) \
{ \
c ^= b; c -= rot(b,14); \
a ^= c; a -= rot(c,11); \
b ^= a; b -= rot(a,25); \
c ^= b; c -= rot(b,16); \
a ^= c; a -= rot(c,4); \
b ^= a; b -= rot(a,14); \
c ^= b; c -= rot(b,24); \
}
__device__ unsigned int hash( char const * key, /* the key to hash */
size_t length, /* length of the key */
const unsigned int initval /* initval */){
unsigned int a,b,c; /* internal state */
union { const char *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
/* Set up the internal state */
a = b = c = 0xdeadbeef + ((unsigned int)length) + initval;
u.ptr = key;
if (((u.i & 0x3) == 0)) {
unsigned int const * k = ( unsigned int const *)key;
/*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
while (length > 12)
{
a += k[0];
b += k[1];
c += k[2];
memcached_mix(a,b,c);
length -= 12;
k += 3;
}
switch(length)
{
case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
case 8 : b+=k[1]; a+=k[0]; break;
case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
case 6 : b+=k[1]&0xffff; a+=k[0]; break;
case 5 : b+=k[1]&0xff; a+=k[0]; break;
case 4 : a+=k[0]; break;
case 3 : a+=k[0]&0xffffff; break;
case 2 : a+=k[0]&0xffff; break;
case 1 : a+=k[0]&0xff; break;
case 0 : return c; /* zero length strings require no mixing */
}
} else if (((u.i & 0x1) == 0)) {
unsigned short const * k = (unsigned short const *)key; /* read 16-bit chunks */
unsigned char const * k8;
/*--------------- all but last block: aligned reads and different mixing */
while (length > 12)
{
a += k[0] + (((unsigned int)k[1])<<16);
b += k[2] + (((unsigned int)k[3])<<16);
c += k[4] + (((unsigned int)k[5])<<16);
memcached_mix(a,b,c);
length -= 12;
k += 6;
}
/*----------------------------- handle the last (probably partial) block */
k8 = ( unsigned char const *)k;
switch(length)
{
case 12: c+=k[4]+(((unsigned int)k[5])<<16);
b+=k[2]+(((unsigned int)k[3])<<16);
a+=k[0]+(((unsigned int)k[1])<<16);
break;
case 11: c+=((unsigned int)k8[10])<<16; /* @fallthrough */
/* no break */
case 10: c+=k[4]; /* @fallthrough@ */
b+=k[2]+(((unsigned int)k[3])<<16);
a+=k[0]+(((unsigned int)k[1])<<16);
break;
case 9 : c+=k8[8]; /* @fallthrough */
case 8 : b+=k[2]+(((unsigned int)k[3])<<16);
a+=k[0]+(((unsigned int)k[1])<<16);
break;
case 7 : b+=((unsigned int)k8[6])<<16; /* @fallthrough */
case 6 : b+=k[2];
a+=k[0]+(((unsigned int)k[1])<<16);
break;
case 5 : b+=k8[4]; /* @fallthrough */
case 4 : a+=k[0]+(((unsigned int)k[1])<<16);
break;
case 3 : a+=((unsigned int)k8[2])<<16; /* @fallthrough */
case 2 : a+=k[0];
break;
case 1 : a+=k8[0];
break;
case 0 : return c; /* zero length strings require no mixing */
}
} else { /* need to read the key one byte at a time */
unsigned char const * k = ( unsigned char const *)key;
/*--------------- all but the last block: affect some 32 bits of (a,b,c) */
while (length > 12)
{
a += k[0];
a += ((unsigned int)k[1])<<8;
a += ((unsigned int)k[2])<<16;
a += ((unsigned int)k[3])<<24;
b += k[4];
b += ((unsigned int)k[5])<<8;
b += ((unsigned int)k[6])<<16;
b += ((unsigned int)k[7])<<24;
c += k[8];
c += ((unsigned int)k[9])<<8;
c += ((unsigned int)k[10])<<16;
c += ((unsigned int)k[11])<<24;
memcached_mix(a,b,c);
length -= 12;
k += 12;
}
/*-------------------------------- last block: affect all 32 bits of (c) */
switch(length) /* all the case statements fall through */
{
case 12: c+=((unsigned int)k[11])<<24;
case 11: c+=((unsigned int)k[10])<<16;
case 10: c+=((unsigned int)k[9])<<8;
case 9 : c+=k[8];
case 8 : b+=((unsigned int)k[7])<<24;
case 7 : b+=((unsigned int)k[6])<<16;
case 6 : b+=((unsigned int)k[5])<<8;
case 5 : b+=k[4];
case 4 : a+=((unsigned int)k[3])<<24;
case 3 : a+=((unsigned int)k[2])<<16;
case 2 : a+=((unsigned int)k[1])<<8;
case 1 : a+=k[0];
break;
case 0 : return c; /* zero length strings require no mixing */
}
}
final(a,b,c);
return c; /* zero length strings require no mixing */
}
/***********************************************/
/***********************************************/
// This checksum skips the ip_header length field, but adds up everything else.
// Later we can add in the length. Used to overlap independent computation to
// reduce processing latency
__device__ int partial_cksum(unsigned char *buf, unsigned nbytes, int sum) {
uint i;
/* Checksum all the pairs of bytes first... */
for (i = 0; i < (nbytes & ~1U); i += 2) {
if(i != 2){ // Bytes 2&3 are the IP header length field, skip it
sum += (u_int16_t) G_NTOHS(*((u_int16_t *)(buf + i)));
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
}
/* If there's a single byte left over, checksum it, too. Network
byte order is big-endian, so the remaining byte is the high byte. */
if(i < nbytes) {
sum += buf [i] << 8;
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
return sum;
}
// Only add up the ip header length once we know the response packet size
__device__ int cksum_hdr_len_only(unsigned char *buf, int sum){
sum += (u_int16_t) G_NTOHS(*((u_int16_t *)(buf + 2)));
if(sum > 0xFFFF)
sum -= 0xFFFF;
return sum;
}
// Full checksum
/*
* Checksum routine for Internet Protocol family headers (C Version)
*
* Borrowed from DHCPd
*/
__device__ int in_cksum(unsigned char *buf, unsigned nbytes, int sum) {
uint i;
/* Checksum all the pairs of bytes first... */
for (i = 0; i < (nbytes & ~1U); i += 2) {
sum += (u_int16_t) G_NTOHS(*((u_int16_t *)(buf + i)));
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
/* If there's a single byte left over, checksum it, too. Network
byte order is big-endian, so the remaining byte is the high byte. */
if(i < nbytes) {
sum += buf [i] << 8;
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
return sum;
}
/* ******************************************* */
__device__ int wrapsum (u_int32_t sum) {
sum = ~sum & 0xFFFF;
return G_NTOHS(sum);
}
/* ******************************************* */
typedef struct _gpu_primary_hashtable_{
void *item_ptr;
rel_time_t last_accessed_time;
unsigned valid;
#ifdef USE_KEY_HASH
unsigned key_hash; // 8-bit key hash - using 4 bytes to keep everything aligned
#endif
unsigned key_length;
unsigned pkt_length;
char key[MAX_KEY_SIZE];
}gpu_primary_hashtable;
typedef struct _gpu_set_req_{
void *item_ptr;
unsigned init_hv;
unsigned key_length;
unsigned pkt_length;
char key[MAX_KEY_SIZE];
}gpu_set_req;
typedef struct _gpu_set_res_{
int host_signal;
int is_evicted;
int is_last_get;
unsigned evicted_hv;
unsigned evicted_lru_timestamp;
void *evitcted_ptr;
}gpu_set_res;
// Forward declarations
__device__ void mod_parse_pkt( unsigned long long first_RX_buffer_ptr,
int local_tid,
int logical_tid,
int thread_type,
mod_pkt_info *mpi,
_key_ *g_key );
__device__ int mod_process_get_request( mod_pkt_info *mpi,
int hashpower,
rel_time_t time,
volatile gpu_primary_hashtable *g_primary_hashtable,
_key_ *g_key,
int *gpu_hash_lock );
__device__ void mod_create_response_header(mod_pkt_info *mpi, int helper_tid);
__device__ void mod_populate_response(size_t *res_mem, mod_pkt_info *mpi, int tid, int helper_tid, int group_id, int *item_is_found, unsigned thread_type, int cta_id, _key_ *g_key);
extern "C" __global__ void memcached_SET_kernel(int *req_mem,
int *res_mem,
int hashpower, // Memcached hashpower
unsigned int *gpu_hashtable, // GPU resident Memcached hashtable
int *gpu_hash_lock, // GPU resident locks for hashtable
rel_time_t timestamp){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int ret=0;
__shared__ unsigned hv;
__shared__ unsigned set_index;
__shared__ unsigned set_hv_index;
__shared__ unsigned insert_hv_index;
__shared__ unsigned key_hash_t;
__shared__ unsigned evict_lru_timestamp;
volatile gpu_primary_hashtable *g_primary_hashtable = (volatile gpu_primary_hashtable *)gpu_hashtable;
gpu_set_req *m_gph = (gpu_set_req *)req_mem;
gpu_set_res *m_gsr = (gpu_set_res *)res_mem;
volatile gpu_primary_hashtable *temp_gph;
volatile gpu_primary_hashtable *gph;
int oldest_item_hv = -1;
size_t oldest_item_time = 0xFFFFFFFFFFFFFFFF;
int free_found = 0;
int is_locked = 0;
int old_lock_val = 0;
unsigned num_sets = hashsize(hashpower) / SET_ASSOC_SIZE;
#ifdef DEBUG
unsigned hv = 0;
if(tid==0){
hv = hash(m_gph->key, m_gph->key_length, 0);
if(hv != m_gph->init_hv){
printf("HASH VALUES NOT EQUAL!!\n");
}
}
#endif
m_gsr->is_evicted = 0; // Set initial to SET eviction. May end up finding a free entry (0) or being a SET hit (2)
// Set Assoc Hashing - Search for a free spot within the set from init_hv
if(tid == 0){
hv = m_gph->init_hv; // Grab the hash value from the CPU calculation
set_index = hv % num_sets; // Calculate the set index for this hash value
set_hv_index = set_index*SET_ASSOC_SIZE; // Move to the correct location in the hash table for this set
key_hash_t = hv & KEY_HASH_MASK; // Calcualte the hash mask
// Lock the current set
while(!is_locked){
old_lock_val = atomicCAS(&gpu_hash_lock[set_index], 0, -1);
if(old_lock_val == UNLOCKED){
is_locked = 1;
}
}
for(unsigned i=0; i<SET_ASSOC_SIZE; ++i){
temp_gph = (volatile gpu_primary_hashtable *)&g_primary_hashtable[set_hv_index + i]; // Index into the hashtable at this set
if(temp_gph->valid > 0){ // This hash location is already occupied, check the next location
// First check key hash. If equal, then do key comparison. Otherwise, no way they're equal.
if(temp_gph->key_hash == key_hash_t){
// If key hash matches, check complete key
ret = fast_memcmp((const void *)m_gph->key, (const void *)temp_gph->key, m_gph->key_length);
if(ret == 1){
// If matches, select this entry to overwrite. Set matching key-value pair to evict.
// This is required to ensure correct ordering on the CPU post processing
// Treat this the same as an LRU evict
oldest_item_time = temp_gph->last_accessed_time;
oldest_item_hv = (set_hv_index+i);
free_found = 0;
m_gsr->is_evicted = 2; // Set to SET hit
break;
}
}
// If no hit, update LRU status for this set
if((temp_gph->last_accessed_time < oldest_item_time) || (oldest_item_hv == -1)){
oldest_item_time = temp_gph->last_accessed_time;
oldest_item_hv = (set_hv_index+i);
}
}else{
// No need to search the whole set if an invalid entry is found
free_found = 1;
insert_hv_index = (set_hv_index + i);
break;
}
}
if(!free_found){
// Didn't find any free spots... Need to evict an item with the oldest timestamp within the set
insert_hv_index = oldest_item_hv;
evict_lru_timestamp = oldest_item_time;
if(m_gsr->is_evicted == 0){
m_gsr->is_evicted = 1;
}
}
}
__syncthreads();
__threadfence();
gph = (volatile gpu_primary_hashtable *)&g_primary_hashtable[insert_hv_index]; // Index into the hashtable
unsigned int *temp_key_src = (unsigned int *)m_gph->key;
unsigned int *temp_key_dst = (unsigned int *)gph->key;
// Block memory copy with all threads in the warp (max key size of 128 with this code)
if(tid < 32){
temp_key_dst[tid] = temp_key_src[tid]; // Copy the key over (Maybe overwriting previous key)
}
__syncthreads();
__threadfence();
if(tid == 0){
if(!free_found){
m_gsr->evicted_hv = oldest_item_hv;
m_gsr->evitcted_ptr = gph->item_ptr;
m_gsr->evicted_lru_timestamp = evict_lru_timestamp;
}
// Set 8-bit key hash
gph->key_hash = hv & KEY_HASH_MASK;
gph->item_ptr = m_gph->item_ptr;
gph->key_length = m_gph->key_length;
gph->pkt_length = m_gph->pkt_length;
// Record whether the last access was a SET or GET request
if(gph->valid == 1){
m_gsr->is_last_get = 0;
}else if(gph->valid == 2){
m_gsr->is_last_get = 1;
}
gph->valid = 1;
gph->last_accessed_time = (unsigned)timestamp;
#ifdef DEBUG
// DEBUG: Verify stored KEY matches
int ret = 0;
ret = d_memcmp((const void *)m_gph->key, (const void *)gph->key, m_gph->key_length);
if(ret != 1){
printf("KEYS NOT EQUAL!!\n");
}
#endif
gpu_hash_lock[set_index] = UNLOCKED; // Unlock the set
}
__threadfence_system();
/************************ End Critical Section ************************/
}
extern "C" __global__ void memcached_GET_kernel(unsigned long long first_req_addr, // Address of first CUDA buffer containing a valid packet
int num_req, // # of requests
int *response_mem, // Memory allocated for responses
int hashpower, // Memcached hashpower
unsigned int *gpu_hashtable, // GPU resident Memcached hashtable
int *gpu_hash_lock, // GPU resident locks for hashtable
rel_time_t timestamp){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int local_tid = threadIdx.x;
int thread_type = ((local_tid % MAX_THREADS_PER_BLOCK) < (MAX_THREADS_PER_BLOCK / 2)) ? 0 : 1;// 0 means actual request threads, 1 means helper threads
// This represents the request # that each thread will be responsible for. Request threads
// will be from 0->NUM_REQUESTS_PER_GROUP
// Each block handles 128 requests (minimum requests/batch), 256 threads per 128 requests.
int group_id;
if(local_tid < MAX_THREADS_PER_BLOCK){
group_id = 0;
}else{
group_id = 1;
}
int half_group_size = MAX_THREADS_PER_BLOCK/2; // ==> 256/2 = 128
int logical_tid = -1;
if(thread_type == 0){
logical_tid = (group_id * half_group_size) + (tid % half_group_size); // First half looks
}else{
logical_tid = (group_id * half_group_size) + ( (tid-half_group_size) % half_group_size);
}
_key_ m_key; // Local key per thread
volatile gpu_primary_hashtable *g_primary_hashtable = (volatile gpu_primary_hashtable *)gpu_hashtable; // Global Memcached Hash Table
__shared__ mod_pkt_info mpi[NUM_REQUESTS_PER_GROUP];
__shared__ int item_is_found[NUM_REQUESTS_PER_GROUP];
m_key.key_len = 0;
// Address of first packet. All other packets are pkt#*RX_BUFFER_SZ away from first_req_addr
unsigned long long m_first_RX_buffer_addr = first_req_addr + (blockIdx.x * RX_BUFFER_SZ * NUM_REQUESTS_PER_GROUP);
mod_parse_pkt(m_first_RX_buffer_addr, local_tid, logical_tid, thread_type, mpi, &m_key);
__syncthreads();
__threadfence();
#ifdef DEBUG
if(mpi[logical_tid].is_get_req){
printf("GET:::tid: %d, local_tid: %d, thread_type: %d, group_id: %d, logical_tid: %d\n", tid, local_tid, thread_type, group_id, logical_tid);
}else{
printf("FAIL:::tid: %d, local_tid: %d, thread_type: %d, group_id: %d, logical_tid: %d\n", tid, local_tid, thread_type, group_id, logical_tid);
}
#endif
if(mpi[logical_tid].is_get_req){
if(thread_type == 0){
item_is_found[logical_tid] = mod_process_get_request(&mpi[logical_tid], hashpower, timestamp, g_primary_hashtable, &m_key, gpu_hash_lock);
}else{
mod_create_response_header(&mpi[logical_tid], logical_tid);
}
}
__syncthreads();
__threadfence();
mod_populate_response((size_t *)response_mem, mpi, local_tid, logical_tid, group_id, item_is_found, thread_type, blockIdx.x, &m_key);
__syncthreads();
__threadfence_system();
}
// TODO: The coalesced packet load is currently hardcoded to 256 requests per sub-group
// and 512 threads per group. This likely doesn't need to change, but it could be
// made configurable.
#define NUM_REQ_PER_LOOP 16
#define WARP_SIZE 32
#ifdef LATENCY_MEASURE
#define THREAD_PER_HDR_COPY 14 // 14 threads * 4 bytes = 56 bytes / hdr = 42 byte header + 8 byte memc hdr + "value "
#else
#define THREAD_PER_HDR_COPY 13 // 13 threads * 4 bytes = 52 bytes / hdr
#endif
__device__ void mod_parse_pkt( unsigned long long first_RX_buffer_ptr,
int local_tid,
int logical_tid,
int thread_type,
mod_pkt_info *mpi,
_key_ *g_key){
const char *GET = "get ";
int *req_ptr = NULL;
int *pkt_hdr_ptr = NULL;
char *pkt_hdr = NULL;
int ehs = sizeof(ether_header);
int ips = sizeof(ip_header);
int udps = sizeof(udp_header);
unsigned network_size = ehs + ips + udps;
ip_header *iph;
udp_header *udp;
char *payload;
char *key;
int count = 0;
u_int16_t check = 0;
int req_ind = (int)(local_tid / WARP_SIZE); // Which warp do you belong to?
req_ind *= NUM_REQ_PER_LOOP;
int w_tid = local_tid % WARP_SIZE;
int masked_ind = w_tid % THREAD_PER_HDR_COPY;
/**********************************************************/
// Load packet headers from global to shared memory *coalesced accesses*
// "LOAD PACKETS" stage from the SoCC paper.
for(unsigned i=0; i<NUM_REQ_PER_LOOP; ++i){
req_ptr = (int *)( first_RX_buffer_ptr + ((req_ind + i)*RX_BUFFER_SZ) );
pkt_hdr_ptr = (int *)(&mpi[req_ind + i].nmch);
pkt_hdr_ptr[masked_ind] = req_ptr[masked_ind];
}
__syncthreads();
// "PARSE UDP PACKET" stage from the SoCC paper
// The packet header contents are all in shared memory, now verify the packet contents (still in global mem)
mpi[logical_tid].is_get_req = 1; // Assume all are UDP Memcached GET requests
if(thread_type == 0){
pkt_hdr = (char *)&mpi[logical_tid].nmch;
iph = (ip_header *)(pkt_hdr + ehs);
udp = (udp_header *)(pkt_hdr + ehs + ips);
payload = (char *)(first_RX_buffer_ptr + (logical_tid*RX_BUFFER_SZ));
payload += (network_size+8);
if(G_NTOHS(udp->dest) != UDP_PORT){
mpi[logical_tid].is_get_req = 0;
#ifdef DEBUG
printf("UDP_PORT WRONG (%hu)\n", G_NTOHS(udp->dest));
#endif
}
// Verify Checksum
// Lower 4-bits of version is the ip_header length (ihl)
if(iph->check != 0){
check = wrapsum(in_cksum((unsigned char *)iph, (iph->version & 0x0F)<<2, 0));
if(check != 0){
mpi[logical_tid].is_get_req = 0;
}
}
if(mpi[logical_tid].is_get_req){
for(unsigned i=0; i<3; ++i){
if(payload[i] != GET[i]){
mpi[logical_tid].is_get_req = 0;
}
}
}
key = payload+4; // Move passed "get "
if(mpi[logical_tid].is_get_req){
// key is guaranteed to be a minimum of 16 bytes, load in 16 bytes as shorts.
for(unsigned i=0; i<8; i++, count += 2){
((short *)(g_key->key))[i] = ((short *)(key))[i];
}
// Then load in the rest, searching for the end condition
while( (key[count] != '\r') || (key[count+1] != '\n') ){
g_key->key[count] = key[count];
count++;
}
// Check if key is too large
if(count >= MAX_KEY_SIZE){
mpi[logical_tid].is_get_req = 0;
}
}
// Set the key length
g_key->key_len = count;
}
}
// Actual Memcached hash + key lookup.
// "Network Service Processing" stage in the SoCC paper
__device__ int mod_process_get_request(mod_pkt_info *mpi, int hashpower, rel_time_t time,
volatile gpu_primary_hashtable *g_primary_hashtable,
_key_ *g_key,
int *gpu_hash_lock){
unsigned hv;
int ret = 0;
size_t nkey = g_key->key_len;
char *key = g_key->key;
volatile char *key_t;
unsigned key_hash_t;
volatile gpu_primary_hashtable *m_gph;
int is_locked = 0;
volatile int old_lock_val = -1;
volatile int new_lock_val = 0;
volatile int new_old_lock_val = 0;
unsigned set_index;
unsigned set_hv_index;
unsigned key_hash = 0;
// Compute the hash
hv = hash(key, nkey, 0);
key_hash = hv & KEY_HASH_MASK; // Compute the hash mask for this key
// Compute the set index for the hash and the corresponding index into the hash table
unsigned num_sets = hashsize(hashpower) / SET_ASSOC_SIZE;
set_index = hv % num_sets; // Calculate the set index for this hash value
set_hv_index = set_index*SET_ASSOC_SIZE; // Move to the correct location in the hash table for this set
// Soft mutex for each GET request. Multiple shared_locks, only single private_lock.
// Grab the shared lock for the set
while(!is_locked){
old_lock_val = gpu_hash_lock[set_index];
if(old_lock_val != -1){ // TEST
new_lock_val = old_lock_val+1;
new_old_lock_val = atomicCAS(&gpu_hash_lock[set_index], old_lock_val, new_lock_val); // and TEST and SET
if(new_old_lock_val == old_lock_val){
is_locked = 1;
}
}
}
// Set initial response length if item isn't found
mpi->pkt_length = RESPONSE_SIZE;
/************************ Critical Section ************************/
for(unsigned i=0; i<SET_ASSOC_SIZE; ++i){
m_gph = (volatile gpu_primary_hashtable *)&g_primary_hashtable[set_hv_index + i];
if(m_gph->valid > 0){
key_t = (volatile char *)m_gph->key;
// New - First check key hash. If equal, then do key comparison. Otherwise, no way they're equal.
key_hash_t = m_gph->key_hash;
if(key_hash == key_hash_t){
ret = fast_memcmp((const void *)key, (const void *)key_t, nkey);
if(ret){
mpi->it = (item *)m_gph->item_ptr; // Update response pointer
#ifndef CONSTANT_RESPONSE_SIZE
mpi->pkt_length = m_gph->pkt_length; // Update value length for response packet size
#endif
m_gph->last_accessed_time = time; // Possible Race Condition if multiple GETs updating this concurrently, but don't care who wins
m_gph->valid = 2; // Update hash table entry to say that last access was a GET request
break;
}
}
}
}
// Unlock the set
atomicSub(&gpu_hash_lock[set_index], 1);
/************************ End Critical Section ************************/
return ret;
}
__device__ void mod_create_response_header(mod_pkt_info *mpi, int helper_tid){
// m_res points to correct response memory for this helper_thread
// mpi contains unmodified packet header, modify in shared memory
// Elements to swap
u_int8_t ether_swap;
u_int16_t ip_addr1;
u_int16_t ip_addr2;
u_int16_t udp_port;
const char *VALUE = "VALUE ";
char *header = (char *)(&mpi->nmch);
ether_header *eh = (ether_header *)header;
ip_header *iph = (ip_header *)&header[14];
udp_header *uh = (udp_header *)&header[34];
// Swap ether
for(unsigned i=0; i<ETH_ALEN; ++i){
ether_swap = eh->ether_shost[i];
eh->ether_shost[i] = eh->ether_dhost[i];
eh->ether_dhost[i] = ether_swap;
}
// Swap IP
ip_addr1 = iph->saddr1;
ip_addr2 = iph->saddr2;
iph->saddr1 = iph->daddr1;
iph->saddr2 = iph->daddr2;
iph->daddr1 = ip_addr1;
iph->daddr2 = ip_addr2;
iph->check = 0;
// Swap UDP port
udp_port = uh->source;
uh->source = uh->dest;
uh->dest = udp_port;
uh->check = 0;
#ifdef CONSTANT_RESPONSE_SIZE
// Assume a constant response packet and calculate the checksum
// (used to force response packets to be smaller than the request
// packets so we can reach peak 13 MRPS throughput with 16 Byte keys).
iph->tot_len = G_HTONS((RESPONSE_SIZE - sizeof(ether_header)));
uh->len = G_HTONS((RESPONSE_SIZE - sizeof(ether_header) - sizeof(ip_header)));
iph->check = wrapsum(in_cksum((unsigned char *)iph, 4*(iph->version & 0x0F), 0));
#else
// Calculate an initial partial checksum without the IP header length field.
// This will be added in afterwards
iph->check = partial_cksum((unsigned char *)iph, 4*(iph->version & 0x0F), 0);
#endif
// Copy in "VALUE "
#ifdef LATENCY_MEASURE
// If doing latency measurements, add the 8byte memc header before "VALUE " (Memc hdr used for a client timestamp)
mpi->nmch.mch.hdr[8] = VALUE[0];
mpi->nmch.mch.hdr[9] = VALUE[1];
mpi->nmch.mch.hdr[10] = VALUE[2];
mpi->nmch.mch.hdr[11] = VALUE[3];
mpi->nmch.mch.hdr[12] = VALUE[4];
mpi->nmch.mch.hdr[13] = VALUE[5];
#else
mpi->nmch.mch.hdr[0] = VALUE[0];
mpi->nmch.mch.hdr[1] = VALUE[1];
mpi->nmch.mch.hdr[2] = VALUE[2];
mpi->nmch.mch.hdr[3] = VALUE[3];
mpi->nmch.mch.hdr[4] = VALUE[4];
mpi->nmch.mch.hdr[5] = VALUE[5];
#endif
return;
}
__device__ void mod_populate_response(size_t *res_mem, mod_pkt_info *mpi, int local_tid, int logical_tid, int group_id, int *item_is_found, unsigned thread_type, int cta_id, _key_ *g_key){
int *res_ptr = NULL;
int *pkt_hdr_ptr = NULL;
int item_ptr_ind = cta_id*NUM_REQUESTS_PER_GROUP + logical_tid;
int req_ind = (int)(local_tid / WARP_SIZE); // Which warp this thread belongs to
req_ind *= NUM_REQ_PER_LOOP;
int w_tid = local_tid % WARP_SIZE;
int masked_ind = w_tid % THREAD_PER_HDR_COPY;
mod_pkt_info *m_mpi = &mpi[logical_tid];
pkt_res_memc_hdr *start_response_pkt_hdr_mem = (pkt_res_memc_hdr *)(res_mem + NUM_REQUESTS_PER_BATCH);
pkt_res_memc_hdr *response_pkt_hdr_mem = (pkt_res_memc_hdr *)&start_response_pkt_hdr_mem[cta_id*NUM_REQUESTS_PER_GROUP];
if(thread_type == 0){ // Thread_type 0 stores the found item pointers
//res_mem[item_ptr_ind] = (size_t)NULL;
if(item_is_found[logical_tid]){
res_mem[item_ptr_ind] = (size_t)mpi[logical_tid].it;
}else{
res_mem[item_ptr_ind] = (size_t)NULL;
}
}
#ifndef CONSTANT_RESPONSE_SIZE // If not using a constant response size, set the packet length fields and update checksum
else {
char *header = (char *)(&m_mpi->nmch);
ip_header *iph = (ip_header *)&header[14];
udp_header *uh = (udp_header *)&header[34];
// Update response packet lengths and compute IP checksum
iph->tot_len = G_HTONS((m_mpi->pkt_length - sizeof(ether_header)));
uh->len = G_HTONS((m_mpi->pkt_length - sizeof(ether_header) - sizeof(ip_header)));
// Already computed a partial checksum without the IP header length field.
// Add the updated length to the checksum.
iph->check = wrapsum(cksum_hdr_len_only((unsigned char *)iph, iph->check));
}
#endif
__syncthreads();
// Finally, store packet response headers from shared to global memory
for(unsigned i=0; i<NUM_REQ_PER_LOOP; ++i){
pkt_hdr_ptr = (int *)(&mpi[req_ind + i].nmch);
res_ptr = (int *)&response_pkt_hdr_mem[req_ind + i];
res_ptr[masked_ind] = pkt_hdr_ptr[masked_ind]; // This copies over the pkt hdr + "VALUE "
}
__syncthreads();
}
|
4694098e8f4bd93820a84f0f9fb7eab5c7adcb9f.hip | // !!! This is a file automatically generated by hipify!!!
//
// Copyright :
// Don't use this in commercial code unless you talk to me and I agree after stipulations.
//
// Description :
// Iterative Two-Opt solver.
//
// Author :
// Name : Jeffrey A Robinson
// Email : [email protected]
//
//
// C++
#include <iostream>
#include <string>
// C
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <limits.h>
// CUDA
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
// Force -Wall after this point, VC only (Check https://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html for GCC)
#pragma warning(push,4)
// data structures
enum ThreadBufferStatus {MORE_THREADS_THAN_BUFFER,EQUAL_SIZE,MORE_BUFFER_THAN_THREADS};
// Data structure used to hold position along path
struct __align__(8) Data {
float x,y;
};
// If not 0 then use Shared Memory Structure to hold x,y and w values; otherwise, each component is held in own array.
#define S_DATA 0
#if S_DATA == 1
// Data-structue for shared memory
struct __align__(8) S_Data {
int w;
float x,y;
};
#endif
// Global stats
static __device__ __managed__ int climbs_d = 0;
static __device__ __managed__ int best_d = INT_MAX;
static __device__ int restart_d = 0;
// Buffer space
#if S_DATA
extern __shared__ S_Data buffer[];
#else
extern __shared__ char buffer[];
__shared__ float *x_buffer;
__shared__ float *y_buffer;
__shared__ int *w_buffer;
#endif
// Wrappers for the shared memory buffer(s)
static __device__ inline void sAtomicMinW(const int &index, const int &v) {
#if S_DATA
atomicMin(&buffer[index].w,v);
#else
atomicMin(w_buffer+index,v);
#endif
}
#if S_DATA
#define sX(index,v) buffer[index].x = v
#define sY(index,v) buffer[index].y = v
#define sW(index,v) buffer[index].w = v
#define gX(index) buffer[index].x
#define gY(index) buffer[index].y
#define gW(index) buffer[index].w
#else
#define sX(index,v) x_buffer[index] = v
#define sY(index,v) y_buffer[index] = v
#define sW(index,v) w_buffer[index] = v
#define gX(index) x_buffer[index]
#define gY(index) y_buffer[index]
#define gW(index) w_buffer[index]
#endif
//
// Description :
// Give two points returns the distance between them
//
// @x1 - X value of the first point
// @x1 - Y value of the first point
// @x2 - X value of the second point
// @y2 - Y value of the second point
//
// @return - Returns the distance between the two points given
static __device__ inline float
dist(float x1, float y1, float x2, float y2) {
float x = x1-x2;
float y = y1-y2; y *= y;
return __float2int_rn(sqrtf(x*x + y));
}
//
// POSSIBLE IDEA :
// We could reduce the atomicAdd by letting each thread do their own work.
// But when they run out then try to grab other blocks remaining work.
//
// Description :
// Returns a unique integer value with the intial value being 0
//
// @return - Returns the next unique integer
static __device__ inline int
nextInt() {
if(threadIdx.x==0) {
sW(0,atomicAdd(&restart_d,1));
}__syncthreads();
return gW(0);
}
//
// Description :
// Allocates and initializes my global memory and shared memory.
//
// @pos - An array that need to be initialized and will hold our path points
// @weight - An array that need to be initialized and will hold our edge weights
// @cities - The amount of points in our graph
//
// @return - Returns true if initialization was successful, false otherwise.
template <int TileSize>
static inline __device__ bool
initMemory(const Data* &pos_d, Data* &pos, int * &weight, const int cities) {
__shared__ Data *d;
__shared__ int *w;
// Allocate my global memory
if(threadIdx.x == 0 ) {
d = new Data[cities + 1];
if(d != NULL) {
w = new int[cities];
if(w == NULL) {
delete d;
d = NULL;
}
}
}__syncthreads();
if(d == NULL) {
return false;
}
// Save new memory locations
pos = d;
weight = w;
for (int i = threadIdx.x; i < cities; i += blockDim.x) pos[i] = pos_d[i];
__syncthreads();
#if S_DATA == 0
// Initialize shared memory
x_buffer = (float*)buffer;
y_buffer = (float*)(buffer + sizeof(float) * TileSize);
w_buffer = (int*)(buffer + 2 * sizeof(float) * TileSize);
#endif
return true;
}
//
// Description :
// Each thread gives some integer value, then the "best" of them is returned.
//
// @t_val - The number that the thread submits as a candidate for the maximum value
// @cities - The number of cities.
//
// @return - The best value of t_val seen from all threads
template <int Reductions,ThreadBufferStatus Status, int TileSize>
static inline __device__ int
maximum(int t_val, const int cities) {
int upper = min(blockDim.x,min(TileSize,cities));
if(Status == MORE_THREADS_THAN_BUFFER) {
int Index = threadIdx.x % TileSize;
w_buffer[Index] = t_val;
__syncthreads();
for(int i = 0 ; i <= (blockDim.x/TileSize); ++i ) {
w_buffer[Index] = t_val = min(t_val,w_buffer[Index]);
}
}else {
w_buffer[threadIdx.x] = t_val;
}__syncthreads();
// 1024
if (TileSize > 512) {
int offset = (upper + 1) / 2; // 200
if( threadIdx.x < offset) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x + offset]);
}__syncthreads();
upper = offset;
}
// 512
if (TileSize > 256) {
int offset = (upper + 1) / 2; // 100
if( threadIdx.x < offset) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x + offset]);
}__syncthreads();
upper = offset;
}
// 256
if (TileSize > 128) {
int offset = (upper + 1) / 2; // 50
if( threadIdx.x < offset) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x + offset]);
}__syncthreads();
upper = offset;
}
// 128
if (TileSize > 64) {
int offset = (upper + 1) / 2; // 25
if( threadIdx.x < offset) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x + offset]);
}__syncthreads();
upper = offset;
}
// 64 and down
if(threadIdx.x < 32) {
if(TileSize > 32) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+(upper+1)/2]);
}
if(threadIdx.x < 16) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+16]);
}
if(threadIdx.x < 8) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+8]);
}
if(threadIdx.x < 4) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+4]);
}
if(threadIdx.x < 2) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+2]);
}
if(threadIdx.x < 1) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+1]);
}
}__syncthreads();
return w_buffer[0];
}
//
// Description :
// After we find the best four position to reconnect with all we need to
// reverse the path between them.
//
// @start - The first position in the sub-path we have to swap with the end
// @end - The last position in the path we have to swap with the start
// @pos - The positions in our path
// @weights - The edge weights between points
static inline __device__ void
reverse(int start, int end, Data* &pos, int* &weight) {
while(start<end) {
int w = weight[start];
Data d = pos[start];
weight[start] = weight[end-1];
pos[start] = pos[end];
weight[end-1] = w;
pos[end] = d;
start += blockDim.x;
end -= blockDim.x;
}__syncthreads();
}
//
// Description :
// Perform a single iteration of Two-Opt.
//
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @minchange - The current best change we can make
// @mini - The ith city in the path that is part of the swap
// @minj - The jth city in the path that is part of the swap
// @cities - The number of cities along the path (excluding the end point)
template <ThreadBufferStatus Status,int TileSize>
static __device__ void
singleIter(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int cities) {
for (int ii = 0; ii < cities - 2; ii += blockDim.x) {
int i = ii + threadIdx.x;
float pxi0, pyi0, pxi1, pyi1, pxj1, pyj1;
if (i < cities - 2) {
minchange -= weight[i];
pxi0 = pos[i].x;
pyi0 = pos[i].y;
pxi1 = pos[i+1].x;
pyi1 = pos[i+1].y;
pxj1 = pos[0].x;
pyj1 = pos[0].y;
}
for (int jj = cities - 1; jj >= ii + 2; jj -= TileSize) {
int bound = jj - TileSize + 1;
if(Status==MORE_BUFFER_THAN_THREADS) {
for(int k = threadIdx.x; k < TileSize; k += blockDim.x) {
int index = k + bound;
if (index >= (ii + 2)) {
sX(k,pos[index].x);
sY(k,pos[index].y);
sW(k,weight[index]);
}
}
}else {
if(threadIdx.x < TileSize) {
int index = threadIdx.x + bound;
if (index >= (ii + 2)) {
sX(threadIdx.x,pos[index].x);
sY(threadIdx.x,pos[index].y);
sW(threadIdx.x,weight[index]);
}
}
}__syncthreads();
int lower = bound;
if (lower < i + 2) lower = i + 2;
for (int j = jj; j >= lower; j--) {
int jm = j - bound;
float pxj0 = gX(jm);
float pyj0 = gY(jm);
int change = gW(jm) +
+ dist(pxi0,pyi0,pxj0,pyj0)
+ dist(pxi1,pyi1,pxj1,pyj1);
pxj1 = pxj0;
pyj1 = pyj0;
if (minchange > change) {
minchange = change;
mini = i;
minj = j;
}
}__syncthreads();
}
if (i < cities - 2) {
minchange += weight[i];
}
}
}
//
// Description :
// Perform the swaps to the edges i and j to decrease the total length of our
// path and update the weight and pos arrays appropriately.
//
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @minchange - The current best change we can make
// @mini - The ith city in the path that is part of the swap
// @minj - The jth city in the path that is part of the swap
// @cities - The number of cities along the path (excluding the end point)
template <int Reductions, ThreadBufferStatus Status, int TileSize>
static __device__ bool
update(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int cities) {
//__shared__ int winner;winner = blockDim.x;
if( maximum<Reductions,Status,TileSize>(minchange, cities) >= 0) return false;
if(minchange == gW(0)) {
sW(1,threadIdx.x);
}__syncthreads();
if(gW(1) == threadIdx.x) {
sW(2,mini);
sW(3,minj);
}__syncthreads();
mini = gW(2);
minj = gW(3);
// Fix path and weights
reverse(mini+1+threadIdx.x,minj-threadIdx.x,pos,weight);
// Fix connecting points
weight[mini] = -dist(pos[mini].x,pos[mini].y,pos[mini+1].x,pos[mini+1].y);
weight[minj] = -dist(pos[minj].x,pos[minj].y,pos[minj+1].x,pos[minj+1].y);
__syncthreads();
return true;
}
//
// Description :
// Given a path we randomly permute it into a new new path and then initialize
// the weights of the path.
//
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @cities - The number of cities along the path (excluding the end point)
static __device__ inline void
permute(Data* &pos, int* &weight, const int cities) {
if (threadIdx.x == 0) { // serial permutation
hiprandState_t rndstate;
hiprand_init(blockIdx.x, 0, 0, &rndstate);
for (int i = 1; i < cities; i++) {
int j = hiprand(&rndstate) % (cities - 1) + 1;
Data d = pos[i];
pos[i] = pos[j];
pos[j] = d;
}
pos[cities] = pos[0];
}__syncthreads();
for (int i = threadIdx.x; i < cities; i += blockDim.x) weight[i] = -dist(pos[i].x, pos[i].y, pos[i+1].x, pos[i+1].y);
__syncthreads();
}
//
// Releases memory and saves results
//
// @pos - Pointer to allocated path memory
// @weight - Pointer to allocated edge weight memory
// @local_climbs - The number of climbs performed by this block
// @best_length - The best length this block found.
static __device__ void inline
cleanup(Data* &pos, int* &weight, int &local_climbs, int &best_length) {
if (threadIdx.x == 0) {
// Save data
atomicAdd(&climbs_d,local_climbs);
atomicMin(&best_d, best_length);
// Release memory
delete pos;
delete weight;
}
}
//
// Description :
// Perform iterative two-opt until there can be no more swaps to reduce
// the path length.
//
// @pos_d - The position of each point in the graph.
// @cities - The number of vertices in the graph
template <int Reductions,ThreadBufferStatus Status, int TileSize>
static __global__ __launch_bounds__(1024, 2) void
TwoOpt(const int Restarts, const Data *pos_d, const int cities) {
Data *pos;
int *weight;
int local_climbs = 0;
int best_length = INT_MAX;
if( !initMemory<TileSize>(pos_d,pos,weight,cities) ) {
if(threadIdx.x == 0) {
printf("Memory initialization error for block %d\n", blockIdx.x);
}
return;
}
for(int r = nextInt() ; r < Restarts; r = nextInt()) {
int mini,minj,minchange;
permute(pos,weight,cities);
do {
++local_climbs; // Global statistics
minchange = mini = minj = 0; // Reset
singleIter<Status,TileSize>(pos, weight, minchange, mini, minj, cities);
} while (update<Reductions,Status,TileSize>(pos, weight, minchange, mini, minj, cities));
__shared__ int w; w = 0;
__syncthreads();
int term = 0;
for (int i = threadIdx.x; i < cities; i += blockDim.x) {
term += dist(pos[i].x, pos[i].y, pos[i+1].x, pos[i+1].y);
}
atomicAdd(&w,term);
__syncthreads();
if(threadIdx.x==0) {
if(w < best_length) {
best_length = w;
}
}
}
cleanup(pos, weight, local_climbs, best_length);
}
//
// Description :
// Checks to see if an error occured with CUDA and if so prints out the message
// passed and the CUDA
// error then quits the application.
//
// @msg - Message to print out if error occurs
static void
CudaTest(const char *msg) {
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
system("PAUSE");
exit(-1);
}
}
// Terrible (TODO: Turn into functions)
#define mallocOnGPU(addr, size) if (hipSuccess != hipMalloc((void **)&addr, size)) fprintf(stderr, "could not allocate GPU memory\n"); CudaTest("couldn't allocate GPU memory");
#define copyToGPU(to, from, size) if (hipSuccess != hipMemcpy(to, from, size, hipMemcpyHostToDevice)) fprintf(stderr, "copying of data to device failed\n"); CudaTest("data copy to device failed");
//
// Description :
// Read TPS lib files into GPU memory. ATT and CEIL_2D edge weight types are
// not supported
//
// @fname - The name of the file to read the TSP data from
// @pos_d - Pointer to the pointer that will hold data on GPU
// and is modified here to be the address on the GPU
//
// @return - Returns the number of cities found
static int
readInput(const char *fname, Data **pos_d) {
int ch, cnt, in1, cities;
float in2, in3;
FILE *f;
Data *pos;
char str[256]; // potential for buffer overrun
f = fopen(fname, "rt");
if (f == NULL) {fprintf(stderr, "could not open file %s\n", fname); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f);
fscanf(f, "%s\n", str);
cities = atoi(str);
if (cities <= 2) {fprintf(stderr, "only %d cities\n", cities); exit(-1);}
pos = new Data[cities]; if (pos == NULL) {fprintf(stderr, "cannot allocate pos\n"); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
fscanf(f, "%s\n", str);
if (strcmp(str, "NODE_COORD_SECTION") != 0) {fprintf(stderr, "wrong file format\n"); exit(-1);}
cnt = 0;
while (fscanf(f, "%d %f %f\n", &in1, &in2, &in3)) {
pos[cnt].x = in2;
pos[cnt].y = in3;
++cnt;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != in1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, in1); exit(-1);}
}
if (cnt != cities) {fprintf(stderr, "read %d instead of %d cities\n", cnt, cities); exit(-1);}
fscanf(f, "%s", str);
if (strcmp(str, "EOF") != 0) {fprintf(stderr, "didn't see 'EOF' at end of file\n"); exit(-1);}
mallocOnGPU(*pos_d, sizeof(Data) * cities);
copyToGPU(*pos_d, pos, sizeof(Data) * cities);
fclose(f);
delete (pos);
return cities;
}
//
// Description :
// Given an enum value return it's string representation
//
// @status - The enum value to translate
//
// @return - The enums string representation in the source code
static const std::string
getName(const ThreadBufferStatus status) {
switch(status) {
case MORE_THREADS_THAN_BUFFER:
return std::string("MORE_THREADS_THAN_BUFFER");
case EQUAL_SIZE:
return std::string("EQUAL_SIZE");
case MORE_BUFFER_THAN_THREADS:
return std::string("MORE_BUFFER_THAN_THREADS");
};
return std::string("enum value not found.");
}
//
// Description :
// Given an integer returns the next multiple of 32 greater than or equal to it.
//
// @in - The integer to round to next multiple of 32
//
// @return - Returns the next multiple of 32 that is greater than or equals to in
static int
next32(int in) {
return ((in + 31) / 32 ) * 32;
}
//
// Description :
// How many reductions do we need to perform in order to make sure we have found
// our min/max/etc
//
// @return returns the number of reductions needed to propogate any value
static int
computeReductions(const int Cities, const int Threads, const int TileSize) {
int MaxData = min(Threads,min(TileSize,Cities));
if(MaxData>512) return 10;
if(MaxData>256) return 9;
if(MaxData>128) return 8;
if(MaxData>64) return 7;
if(MaxData>32) return 6;
return 5;
}
//
// Description :
// Calculates the maximum number of resident blocks that the card can hold
//
// @Threads - Number of threads that each block will have
// @Shared_Bytes - The amount of bytes each block will allocate
//
// @return - Returns the number of blocks the card can have resident
static int
getMaxBlocks(const int Shared_Bytes, const int Threads) {
hipDeviceProp_t props;
hipGetDeviceProperties(&props,0);
if(props.major < 3) {
const int Max_Shared = 16384;
const int Block_Shared_Limit = (Max_Shared / Shared_Bytes);
return props.multiProcessorCount * min(8,min(Block_Shared_Limit,(int)(2048/Threads)));
}else if(props.major < 5) {
const int Max_Shared = 32768;
const int Block_Shared_Limit = (Max_Shared / Shared_Bytes);
return props.multiProcessorCount * min(16,min(Block_Shared_Limit,(int)(2048/Threads)));
}else {
const int Max_Shared = 65536;
const int Block_Shared_Limit = (Max_Shared / Shared_Bytes);
return props.multiProcessorCount * min(32,min(Block_Shared_Limit,(int)(2048/Threads)));
}
}
//
// private : Handle ThreadBufferStatus kernel selection
//
template <int Reductions,int TileSize>
static float
_wrapStatus(const int Restarts, const int Threads, const Data *Pos_d, const int Cities) {
float gpuExecutionTime;
// A quick way to template out some checks. If more threads than buffer then I have to atomicMin to a single slot.
const ThreadBufferStatus Status = (Threads > TileSize) ? MORE_THREADS_THAN_BUFFER : (Threads < TileSize) ? MORE_BUFFER_THAN_THREADS : EQUAL_SIZE;
// Amount of shared memory in Bytes
#if S_DATA
const int Shared_Bytes = sizeof(S_Data) * TileSize;
#else
const int Shared_Bytes = (sizeof(int) + 2 * sizeof(float)) * TileSize;
#endif
// Calculate number of maximum number of resident blocks allowed on the card
const int Blocks = min(Restarts,getMaxBlocks(Shared_Bytes,Threads));
// Output runtime configuration
std::cout << "Blocks = " << Blocks
<< ", Threads = " << Threads
<< ", TileSize = " << TileSize
<< ", Status = " << getName(Status)
<< ", Reductions = " << Reductions
<< ", Shared Bytes = " << Shared_Bytes << std::endl;
hipEvent_t begin,end;
hipEventCreate(&begin);
hipEventCreate(&end);
switch(Status) {
case MORE_THREADS_THAN_BUFFER:
hipEventRecord(begin,0);
hipLaunchKernelGGL(( TwoOpt<Reductions,MORE_THREADS_THAN_BUFFER,TileSize>), dim3(Blocks),dim3(Threads),Shared_Bytes, 0, Restarts,Pos_d,Cities);
hipEventRecord(end,0);
hipEventSynchronize(end);
break;
case EQUAL_SIZE:
hipEventRecord(begin,0);
hipLaunchKernelGGL(( TwoOpt<Reductions,EQUAL_SIZE,TileSize>), dim3(Blocks),dim3(Threads),Shared_Bytes, 0, Restarts,Pos_d,Cities);
hipEventRecord(end,0);
hipEventSynchronize(end);
break;
case MORE_BUFFER_THAN_THREADS:
hipEventRecord(begin,0);
hipLaunchKernelGGL(( TwoOpt<Reductions,MORE_BUFFER_THAN_THREADS,TileSize>), dim3(Blocks),dim3(Threads),Shared_Bytes, 0, Restarts,Pos_d,Cities);
hipEventRecord(end,0);
hipEventSynchronize(end);
break;
};
hipEventElapsedTime(&gpuExecutionTime,begin,end);
hipEventDestroy(begin);
hipEventDestroy(end);
return gpuExecutionTime;
}
//
// private : Handle Reduction kernel selection
//
template <int TileSize>
static float
_wrapReduction(const int Restarts, const int Threads, const Data *Pos, const int Cities) {
const int Reductions = computeReductions(Cities,Threads,TileSize);
switch(Reductions) {
case 10:
return _wrapStatus<10,TileSize>(Restarts, Threads, Pos, Cities);
case 9:
return _wrapStatus<9,TileSize>(Restarts, Threads, Pos, Cities);
case 8:
return _wrapStatus<8,TileSize>(Restarts, Threads, Pos, Cities);
case 7:
return _wrapStatus<7,TileSize>(Restarts, Threads, Pos, Cities);
case 6:
return _wrapStatus<6,TileSize>(Restarts, Threads, Pos, Cities);
default:
return _wrapStatus<5,TileSize>(Restarts, Threads, Pos, Cities);
}
}
//
// Description :
// Given these parameters we construct and start a CUDA kernel.
//
// @Cities - Number of cities or nodes in the graph
// @Pos - Position data of graph nodes.
// @Restarts - How many different random permutations of input city should be try
// @Threads - The number of threads each block should have
// @TileSize - The shared buffer size for our sliding tile.
//
// @return - Returns the duration of the kernel in milliseconds.
static float
RunKernel(const int Cities, const Data *Pos, const int Restarts, const int Threads, const int TileSize) {
switch(TileSize) {
case 32:
return _wrapReduction<32>(Restarts,Threads,Pos,Cities);
case 64:
return _wrapReduction<64>(Restarts,Threads,Pos,Cities);
case 96:
return _wrapReduction<96>(Restarts,Threads,Pos,Cities);
case 128:
return _wrapReduction<128>(Restarts,Threads,Pos,Cities);
case 160:
return _wrapReduction<160>(Restarts,Threads,Pos,Cities);
case 192:
return _wrapReduction<192>(Restarts,Threads,Pos,Cities);
case 224:
return _wrapReduction<224>(Restarts,Threads,Pos,Cities);
case 256:
return _wrapReduction<256>(Restarts,Threads,Pos,Cities);
case 288:
return _wrapReduction<288>(Restarts,Threads,Pos,Cities);
case 320:
return _wrapReduction<320>(Restarts,Threads,Pos,Cities);
case 352:
return _wrapReduction<352>(Restarts,Threads,Pos,Cities);
case 384:
return _wrapReduction<384>(Restarts,Threads,Pos,Cities);
case 416:
return _wrapReduction<416>(Restarts,Threads,Pos,Cities);
case 448:
return _wrapReduction<448>(Restarts,Threads,Pos,Cities);
case 480:
return _wrapReduction<480>(Restarts,Threads,Pos,Cities);
case 512:
return _wrapReduction<512>(Restarts,Threads,Pos,Cities);
case 544:
return _wrapReduction<544>(Restarts,Threads,Pos,Cities);
case 576:
return _wrapReduction<576>(Restarts,Threads,Pos,Cities);
case 608:
return _wrapReduction<608>(Restarts,Threads,Pos,Cities);
case 640:
return _wrapReduction<640>(Restarts,Threads,Pos,Cities);
case 672:
return _wrapReduction<672>(Restarts,Threads,Pos,Cities);
case 704:
return _wrapReduction<704>(Restarts,Threads,Pos,Cities);
case 736:
return _wrapReduction<736>(Restarts,Threads,Pos,Cities);
case 768:
return _wrapReduction<768>(Restarts,Threads,Pos,Cities);
case 800:
return _wrapReduction<800>(Restarts,Threads,Pos,Cities);
case 832:
return _wrapReduction<832>(Restarts,Threads,Pos,Cities);
case 864:
return _wrapReduction<864>(Restarts,Threads,Pos,Cities);
case 896:
return _wrapReduction<896>(Restarts,Threads,Pos,Cities);
case 928:
return _wrapReduction<928>(Restarts,Threads,Pos,Cities);
case 960:
return _wrapReduction<960>(Restarts,Threads,Pos,Cities);
case 992:
return _wrapReduction<992>(Restarts,Threads,Pos,Cities);
case 1024:
return _wrapReduction<1024>(Restarts,Threads,Pos,Cities);
default:
std::cout << "Invalid TileSize = " << TileSize << std::endl;
exit(-1);
};
return -1;
}
//
// Description :
// Main entry point to our iterative Two-Opt solver.
// Options are ./<name> problem_file restarts <threads> <buffer_size>
//
// @argc - Number of command line parameters (including program name)
// @argv - Holds command line arguments
//
// @return - Returns 0 if success, otherwise failure.
int
main(int argc, char *argv[]) {
printf("2-opt TSP CUDA GPU code v2.1 [Kepler]\n");
printf("Copyright (c) 2014, Texas State University. All rights reserved.\n");
if (argc < 3 || argc > 5) {fprintf(stderr, "\narguments: input_file restart_count <threads> <tilesize> \n"); exit(-1);}
const int Restarts = atoi(argv[2]);
if (Restarts < 1) {fprintf(stderr, "restart_count is too small: %d\n", Restarts); exit(-1);}
Data *pos_d;
const int Cities = readInput(argv[1], &pos_d);
printf("configuration: %d cities, %d restarts, %s input\n", Cities, Restarts, argv[1]);
const int Threads = min(1024,(argc >= 4) ? next32(atoi(argv[3])) : next32(Cities));
const int TileSize = min(1024,(argc >= 5) ? next32(atoi(argv[4])) : Threads);
const float GpuExecutionTime = RunKernel(Cities,pos_d,Restarts,Threads,TileSize);
int hours = (int)(GpuExecutionTime / (3600.0f * 1000.0f));
int seconds = (int)(GpuExecutionTime/1000) % 60;
int minutes = (int)(GpuExecutionTime/1000) / 60;
long long moves = 1LL * climbs_d * (Cities - 2) * (Cities - 1) / 2;
std::cout << moves * 0.000001 / GpuExecutionTime << "Gmoves/s" << std::endl;
std::cout << "best found tour length = " << best_d << std::endl;
std::cout << "Total Time : " << GpuExecutionTime / 1000.0f << "s" << std::endl;
std::cout << "Hours = " << hours << ", Minutes = " << minutes << ", Seconds = " << seconds << ", Milliseconds = " << (int)(GpuExecutionTime) % 1000 << std::endl;
hipDeviceReset();
hipFree(pos_d);
return 0;
}
| 4694098e8f4bd93820a84f0f9fb7eab5c7adcb9f.cu | //
// Copyright :
// Don't use this in commercial code unless you talk to me and I agree after stipulations.
//
// Description :
// Iterative Two-Opt solver.
//
// Author :
// Name : Jeffrey A Robinson
// Email : [email protected]
//
//
// C++
#include <iostream>
#include <string>
// C
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <limits.h>
// CUDA
#include <cuda.h>
#include <curand_kernel.h>
// Force -Wall after this point, VC only (Check https://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html for GCC)
#pragma warning(push,4)
// data structures
enum ThreadBufferStatus {MORE_THREADS_THAN_BUFFER,EQUAL_SIZE,MORE_BUFFER_THAN_THREADS};
// Data structure used to hold position along path
struct __align__(8) Data {
float x,y;
};
// If not 0 then use Shared Memory Structure to hold x,y and w values; otherwise, each component is held in own array.
#define S_DATA 0
#if S_DATA == 1
// Data-structue for shared memory
struct __align__(8) S_Data {
int w;
float x,y;
};
#endif
// Global stats
static __device__ __managed__ int climbs_d = 0;
static __device__ __managed__ int best_d = INT_MAX;
static __device__ int restart_d = 0;
// Buffer space
#if S_DATA
extern __shared__ S_Data buffer[];
#else
extern __shared__ char buffer[];
__shared__ float *x_buffer;
__shared__ float *y_buffer;
__shared__ int *w_buffer;
#endif
// Wrappers for the shared memory buffer(s)
static __device__ inline void sAtomicMinW(const int &index, const int &v) {
#if S_DATA
atomicMin(&buffer[index].w,v);
#else
atomicMin(w_buffer+index,v);
#endif
}
#if S_DATA
#define sX(index,v) buffer[index].x = v
#define sY(index,v) buffer[index].y = v
#define sW(index,v) buffer[index].w = v
#define gX(index) buffer[index].x
#define gY(index) buffer[index].y
#define gW(index) buffer[index].w
#else
#define sX(index,v) x_buffer[index] = v
#define sY(index,v) y_buffer[index] = v
#define sW(index,v) w_buffer[index] = v
#define gX(index) x_buffer[index]
#define gY(index) y_buffer[index]
#define gW(index) w_buffer[index]
#endif
//
// Description :
// Give two points returns the distance between them
//
// @x1 - X value of the first point
// @x1 - Y value of the first point
// @x2 - X value of the second point
// @y2 - Y value of the second point
//
// @return - Returns the distance between the two points given
static __device__ inline float
dist(float x1, float y1, float x2, float y2) {
float x = x1-x2;
float y = y1-y2; y *= y;
return __float2int_rn(sqrtf(x*x + y));
}
//
// POSSIBLE IDEA :
// We could reduce the atomicAdd by letting each thread do their own work.
// But when they run out then try to grab other blocks remaining work.
//
// Description :
// Returns a unique integer value with the intial value being 0
//
// @return - Returns the next unique integer
static __device__ inline int
nextInt() {
if(threadIdx.x==0) {
sW(0,atomicAdd(&restart_d,1));
}__syncthreads();
return gW(0);
}
//
// Description :
// Allocates and initializes my global memory and shared memory.
//
// @pos - An array that need to be initialized and will hold our path points
// @weight - An array that need to be initialized and will hold our edge weights
// @cities - The amount of points in our graph
//
// @return - Returns true if initialization was successful, false otherwise.
template <int TileSize>
static inline __device__ bool
initMemory(const Data* &pos_d, Data* &pos, int * &weight, const int cities) {
__shared__ Data *d;
__shared__ int *w;
// Allocate my global memory
if(threadIdx.x == 0 ) {
d = new Data[cities + 1];
if(d != NULL) {
w = new int[cities];
if(w == NULL) {
delete d;
d = NULL;
}
}
}__syncthreads();
if(d == NULL) {
return false;
}
// Save new memory locations
pos = d;
weight = w;
for (int i = threadIdx.x; i < cities; i += blockDim.x) pos[i] = pos_d[i];
__syncthreads();
#if S_DATA == 0
// Initialize shared memory
x_buffer = (float*)buffer;
y_buffer = (float*)(buffer + sizeof(float) * TileSize);
w_buffer = (int*)(buffer + 2 * sizeof(float) * TileSize);
#endif
return true;
}
//
// Description :
// Each thread gives some integer value, then the "best" of them is returned.
//
// @t_val - The number that the thread submits as a candidate for the maximum value
// @cities - The number of cities.
//
// @return - The best value of t_val seen from all threads
template <int Reductions,ThreadBufferStatus Status, int TileSize>
static inline __device__ int
maximum(int t_val, const int cities) {
int upper = min(blockDim.x,min(TileSize,cities));
if(Status == MORE_THREADS_THAN_BUFFER) {
int Index = threadIdx.x % TileSize;
w_buffer[Index] = t_val;
__syncthreads();
for(int i = 0 ; i <= (blockDim.x/TileSize); ++i ) {
w_buffer[Index] = t_val = min(t_val,w_buffer[Index]);
}
}else {
w_buffer[threadIdx.x] = t_val;
}__syncthreads();
// 1024
if (TileSize > 512) {
int offset = (upper + 1) / 2; // 200
if( threadIdx.x < offset) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x + offset]);
}__syncthreads();
upper = offset;
}
// 512
if (TileSize > 256) {
int offset = (upper + 1) / 2; // 100
if( threadIdx.x < offset) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x + offset]);
}__syncthreads();
upper = offset;
}
// 256
if (TileSize > 128) {
int offset = (upper + 1) / 2; // 50
if( threadIdx.x < offset) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x + offset]);
}__syncthreads();
upper = offset;
}
// 128
if (TileSize > 64) {
int offset = (upper + 1) / 2; // 25
if( threadIdx.x < offset) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x + offset]);
}__syncthreads();
upper = offset;
}
// 64 and down
if(threadIdx.x < 32) {
if(TileSize > 32) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+(upper+1)/2]);
}
if(threadIdx.x < 16) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+16]);
}
if(threadIdx.x < 8) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+8]);
}
if(threadIdx.x < 4) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+4]);
}
if(threadIdx.x < 2) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+2]);
}
if(threadIdx.x < 1) {
w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x+1]);
}
}__syncthreads();
return w_buffer[0];
}
//
// Description :
// After we find the best four position to reconnect with all we need to
// reverse the path between them.
//
// @start - The first position in the sub-path we have to swap with the end
// @end - The last position in the path we have to swap with the start
// @pos - The positions in our path
// @weights - The edge weights between points
static inline __device__ void
reverse(int start, int end, Data* &pos, int* &weight) {
while(start<end) {
int w = weight[start];
Data d = pos[start];
weight[start] = weight[end-1];
pos[start] = pos[end];
weight[end-1] = w;
pos[end] = d;
start += blockDim.x;
end -= blockDim.x;
}__syncthreads();
}
//
// Description :
// Perform a single iteration of Two-Opt.
//
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @minchange - The current best change we can make
// @mini - The ith city in the path that is part of the swap
// @minj - The jth city in the path that is part of the swap
// @cities - The number of cities along the path (excluding the end point)
template <ThreadBufferStatus Status,int TileSize>
static __device__ void
singleIter(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int cities) {
for (int ii = 0; ii < cities - 2; ii += blockDim.x) {
int i = ii + threadIdx.x;
float pxi0, pyi0, pxi1, pyi1, pxj1, pyj1;
if (i < cities - 2) {
minchange -= weight[i];
pxi0 = pos[i].x;
pyi0 = pos[i].y;
pxi1 = pos[i+1].x;
pyi1 = pos[i+1].y;
pxj1 = pos[0].x;
pyj1 = pos[0].y;
}
for (int jj = cities - 1; jj >= ii + 2; jj -= TileSize) {
int bound = jj - TileSize + 1;
if(Status==MORE_BUFFER_THAN_THREADS) {
for(int k = threadIdx.x; k < TileSize; k += blockDim.x) {
int index = k + bound;
if (index >= (ii + 2)) {
sX(k,pos[index].x);
sY(k,pos[index].y);
sW(k,weight[index]);
}
}
}else {
if(threadIdx.x < TileSize) {
int index = threadIdx.x + bound;
if (index >= (ii + 2)) {
sX(threadIdx.x,pos[index].x);
sY(threadIdx.x,pos[index].y);
sW(threadIdx.x,weight[index]);
}
}
}__syncthreads();
int lower = bound;
if (lower < i + 2) lower = i + 2;
for (int j = jj; j >= lower; j--) {
int jm = j - bound;
float pxj0 = gX(jm);
float pyj0 = gY(jm);
int change = gW(jm) +
+ dist(pxi0,pyi0,pxj0,pyj0)
+ dist(pxi1,pyi1,pxj1,pyj1);
pxj1 = pxj0;
pyj1 = pyj0;
if (minchange > change) {
minchange = change;
mini = i;
minj = j;
}
}__syncthreads();
}
if (i < cities - 2) {
minchange += weight[i];
}
}
}
//
// Description :
// Perform the swaps to the edges i and j to decrease the total length of our
// path and update the weight and pos arrays appropriately.
//
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @minchange - The current best change we can make
// @mini - The ith city in the path that is part of the swap
// @minj - The jth city in the path that is part of the swap
// @cities - The number of cities along the path (excluding the end point)
template <int Reductions, ThreadBufferStatus Status, int TileSize>
static __device__ bool
update(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int cities) {
//__shared__ int winner;winner = blockDim.x;
if( maximum<Reductions,Status,TileSize>(minchange, cities) >= 0) return false;
if(minchange == gW(0)) {
sW(1,threadIdx.x);
}__syncthreads();
if(gW(1) == threadIdx.x) {
sW(2,mini);
sW(3,minj);
}__syncthreads();
mini = gW(2);
minj = gW(3);
// Fix path and weights
reverse(mini+1+threadIdx.x,minj-threadIdx.x,pos,weight);
// Fix connecting points
weight[mini] = -dist(pos[mini].x,pos[mini].y,pos[mini+1].x,pos[mini+1].y);
weight[minj] = -dist(pos[minj].x,pos[minj].y,pos[minj+1].x,pos[minj+1].y);
__syncthreads();
return true;
}
//
// Description :
// Given a path we randomly permute it into a new new path and then initialize
// the weights of the path.
//
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @cities - The number of cities along the path (excluding the end point)
static __device__ inline void
permute(Data* &pos, int* &weight, const int cities) {
if (threadIdx.x == 0) { // serial permutation
curandState rndstate;
curand_init(blockIdx.x, 0, 0, &rndstate);
for (int i = 1; i < cities; i++) {
int j = curand(&rndstate) % (cities - 1) + 1;
Data d = pos[i];
pos[i] = pos[j];
pos[j] = d;
}
pos[cities] = pos[0];
}__syncthreads();
for (int i = threadIdx.x; i < cities; i += blockDim.x) weight[i] = -dist(pos[i].x, pos[i].y, pos[i+1].x, pos[i+1].y);
__syncthreads();
}
//
// Releases memory and saves results
//
// @pos - Pointer to allocated path memory
// @weight - Pointer to allocated edge weight memory
// @local_climbs - The number of climbs performed by this block
// @best_length - The best length this block found.
static __device__ void inline
cleanup(Data* &pos, int* &weight, int &local_climbs, int &best_length) {
if (threadIdx.x == 0) {
// Save data
atomicAdd(&climbs_d,local_climbs);
atomicMin(&best_d, best_length);
// Release memory
delete pos;
delete weight;
}
}
//
// Description :
// Perform iterative two-opt until there can be no more swaps to reduce
// the path length.
//
// @pos_d - The position of each point in the graph.
// @cities - The number of vertices in the graph
template <int Reductions,ThreadBufferStatus Status, int TileSize>
static __global__ __launch_bounds__(1024, 2) void
TwoOpt(const int Restarts, const Data *pos_d, const int cities) {
Data *pos;
int *weight;
int local_climbs = 0;
int best_length = INT_MAX;
if( !initMemory<TileSize>(pos_d,pos,weight,cities) ) {
if(threadIdx.x == 0) {
printf("Memory initialization error for block %d\n", blockIdx.x);
}
return;
}
for(int r = nextInt() ; r < Restarts; r = nextInt()) {
int mini,minj,minchange;
permute(pos,weight,cities);
do {
++local_climbs; // Global statistics
minchange = mini = minj = 0; // Reset
singleIter<Status,TileSize>(pos, weight, minchange, mini, minj, cities);
} while (update<Reductions,Status,TileSize>(pos, weight, minchange, mini, minj, cities));
__shared__ int w; w = 0;
__syncthreads();
int term = 0;
for (int i = threadIdx.x; i < cities; i += blockDim.x) {
term += dist(pos[i].x, pos[i].y, pos[i+1].x, pos[i+1].y);
}
atomicAdd(&w,term);
__syncthreads();
if(threadIdx.x==0) {
if(w < best_length) {
best_length = w;
}
}
}
cleanup(pos, weight, local_climbs, best_length);
}
//
// Description :
// Checks to see if an error occured with CUDA and if so prints out the message
// passed and the CUDA
// error then quits the application.
//
// @msg - Message to print out if error occurs
static void
CudaTest(const char *msg) {
cudaError_t e;
cudaThreadSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
system("PAUSE");
exit(-1);
}
}
// Terrible (TODO: Turn into functions)
#define mallocOnGPU(addr, size) if (cudaSuccess != cudaMalloc((void **)&addr, size)) fprintf(stderr, "could not allocate GPU memory\n"); CudaTest("couldn't allocate GPU memory");
#define copyToGPU(to, from, size) if (cudaSuccess != cudaMemcpy(to, from, size, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of data to device failed\n"); CudaTest("data copy to device failed");
//
// Description :
// Read TPS lib files into GPU memory. ATT and CEIL_2D edge weight types are
// not supported
//
// @fname - The name of the file to read the TSP data from
// @pos_d - Pointer to the pointer that will hold data on GPU
// and is modified here to be the address on the GPU
//
// @return - Returns the number of cities found
static int
readInput(const char *fname, Data **pos_d) {
int ch, cnt, in1, cities;
float in2, in3;
FILE *f;
Data *pos;
char str[256]; // potential for buffer overrun
f = fopen(fname, "rt");
if (f == NULL) {fprintf(stderr, "could not open file %s\n", fname); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f);
fscanf(f, "%s\n", str);
cities = atoi(str);
if (cities <= 2) {fprintf(stderr, "only %d cities\n", cities); exit(-1);}
pos = new Data[cities]; if (pos == NULL) {fprintf(stderr, "cannot allocate pos\n"); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
fscanf(f, "%s\n", str);
if (strcmp(str, "NODE_COORD_SECTION") != 0) {fprintf(stderr, "wrong file format\n"); exit(-1);}
cnt = 0;
while (fscanf(f, "%d %f %f\n", &in1, &in2, &in3)) {
pos[cnt].x = in2;
pos[cnt].y = in3;
++cnt;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != in1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, in1); exit(-1);}
}
if (cnt != cities) {fprintf(stderr, "read %d instead of %d cities\n", cnt, cities); exit(-1);}
fscanf(f, "%s", str);
if (strcmp(str, "EOF") != 0) {fprintf(stderr, "didn't see 'EOF' at end of file\n"); exit(-1);}
mallocOnGPU(*pos_d, sizeof(Data) * cities);
copyToGPU(*pos_d, pos, sizeof(Data) * cities);
fclose(f);
delete (pos);
return cities;
}
//
// Description :
// Given an enum value return it's string representation
//
// @status - The enum value to translate
//
// @return - The enums string representation in the source code
static const std::string
getName(const ThreadBufferStatus status) {
switch(status) {
case MORE_THREADS_THAN_BUFFER:
return std::string("MORE_THREADS_THAN_BUFFER");
case EQUAL_SIZE:
return std::string("EQUAL_SIZE");
case MORE_BUFFER_THAN_THREADS:
return std::string("MORE_BUFFER_THAN_THREADS");
};
return std::string("enum value not found.");
}
//
// Description :
// Given an integer returns the next multiple of 32 greater than or equal to it.
//
// @in - The integer to round to next multiple of 32
//
// @return - Returns the next multiple of 32 that is greater than or equals to in
static int
next32(int in) {
return ((in + 31) / 32 ) * 32;
}
//
// Description :
// How many reductions do we need to perform in order to make sure we have found
// our min/max/etc
//
// @return returns the number of reductions needed to propogate any value
static int
computeReductions(const int Cities, const int Threads, const int TileSize) {
int MaxData = min(Threads,min(TileSize,Cities));
if(MaxData>512) return 10;
if(MaxData>256) return 9;
if(MaxData>128) return 8;
if(MaxData>64) return 7;
if(MaxData>32) return 6;
return 5;
}
//
// Description :
// Calculates the maximum number of resident blocks that the card can hold
//
// @Threads - Number of threads that each block will have
// @Shared_Bytes - The amount of bytes each block will allocate
//
// @return - Returns the number of blocks the card can have resident
static int
getMaxBlocks(const int Shared_Bytes, const int Threads) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props,0);
if(props.major < 3) {
const int Max_Shared = 16384;
const int Block_Shared_Limit = (Max_Shared / Shared_Bytes);
return props.multiProcessorCount * min(8,min(Block_Shared_Limit,(int)(2048/Threads)));
}else if(props.major < 5) {
const int Max_Shared = 32768;
const int Block_Shared_Limit = (Max_Shared / Shared_Bytes);
return props.multiProcessorCount * min(16,min(Block_Shared_Limit,(int)(2048/Threads)));
}else {
const int Max_Shared = 65536;
const int Block_Shared_Limit = (Max_Shared / Shared_Bytes);
return props.multiProcessorCount * min(32,min(Block_Shared_Limit,(int)(2048/Threads)));
}
}
//
// private : Handle ThreadBufferStatus kernel selection
//
template <int Reductions,int TileSize>
static float
_wrapStatus(const int Restarts, const int Threads, const Data *Pos_d, const int Cities) {
float gpuExecutionTime;
// A quick way to template out some checks. If more threads than buffer then I have to atomicMin to a single slot.
const ThreadBufferStatus Status = (Threads > TileSize) ? MORE_THREADS_THAN_BUFFER : (Threads < TileSize) ? MORE_BUFFER_THAN_THREADS : EQUAL_SIZE;
// Amount of shared memory in Bytes
#if S_DATA
const int Shared_Bytes = sizeof(S_Data) * TileSize;
#else
const int Shared_Bytes = (sizeof(int) + 2 * sizeof(float)) * TileSize;
#endif
// Calculate number of maximum number of resident blocks allowed on the card
const int Blocks = min(Restarts,getMaxBlocks(Shared_Bytes,Threads));
// Output runtime configuration
std::cout << "Blocks = " << Blocks
<< ", Threads = " << Threads
<< ", TileSize = " << TileSize
<< ", Status = " << getName(Status)
<< ", Reductions = " << Reductions
<< ", Shared Bytes = " << Shared_Bytes << std::endl;
cudaEvent_t begin,end;
cudaEventCreate(&begin);
cudaEventCreate(&end);
switch(Status) {
case MORE_THREADS_THAN_BUFFER:
cudaEventRecord(begin,0);
TwoOpt<Reductions,MORE_THREADS_THAN_BUFFER,TileSize><<<Blocks,Threads,Shared_Bytes>>>(Restarts,Pos_d,Cities);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
break;
case EQUAL_SIZE:
cudaEventRecord(begin,0);
TwoOpt<Reductions,EQUAL_SIZE,TileSize><<<Blocks,Threads,Shared_Bytes>>>(Restarts,Pos_d,Cities);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
break;
case MORE_BUFFER_THAN_THREADS:
cudaEventRecord(begin,0);
TwoOpt<Reductions,MORE_BUFFER_THAN_THREADS,TileSize><<<Blocks,Threads,Shared_Bytes>>>(Restarts,Pos_d,Cities);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
break;
};
cudaEventElapsedTime(&gpuExecutionTime,begin,end);
cudaEventDestroy(begin);
cudaEventDestroy(end);
return gpuExecutionTime;
}
//
// private : Handle Reduction kernel selection
//
template <int TileSize>
static float
_wrapReduction(const int Restarts, const int Threads, const Data *Pos, const int Cities) {
const int Reductions = computeReductions(Cities,Threads,TileSize);
switch(Reductions) {
case 10:
return _wrapStatus<10,TileSize>(Restarts, Threads, Pos, Cities);
case 9:
return _wrapStatus<9,TileSize>(Restarts, Threads, Pos, Cities);
case 8:
return _wrapStatus<8,TileSize>(Restarts, Threads, Pos, Cities);
case 7:
return _wrapStatus<7,TileSize>(Restarts, Threads, Pos, Cities);
case 6:
return _wrapStatus<6,TileSize>(Restarts, Threads, Pos, Cities);
default:
return _wrapStatus<5,TileSize>(Restarts, Threads, Pos, Cities);
}
}
//
// Description :
// Given these parameters we construct and start a CUDA kernel.
//
// @Cities - Number of cities or nodes in the graph
// @Pos - Position data of graph nodes.
// @Restarts - How many different random permutations of input city should be try
// @Threads - The number of threads each block should have
// @TileSize - The shared buffer size for our sliding tile.
//
// @return - Returns the duration of the kernel in milliseconds.
static float
RunKernel(const int Cities, const Data *Pos, const int Restarts, const int Threads, const int TileSize) {
switch(TileSize) {
case 32:
return _wrapReduction<32>(Restarts,Threads,Pos,Cities);
case 64:
return _wrapReduction<64>(Restarts,Threads,Pos,Cities);
case 96:
return _wrapReduction<96>(Restarts,Threads,Pos,Cities);
case 128:
return _wrapReduction<128>(Restarts,Threads,Pos,Cities);
case 160:
return _wrapReduction<160>(Restarts,Threads,Pos,Cities);
case 192:
return _wrapReduction<192>(Restarts,Threads,Pos,Cities);
case 224:
return _wrapReduction<224>(Restarts,Threads,Pos,Cities);
case 256:
return _wrapReduction<256>(Restarts,Threads,Pos,Cities);
case 288:
return _wrapReduction<288>(Restarts,Threads,Pos,Cities);
case 320:
return _wrapReduction<320>(Restarts,Threads,Pos,Cities);
case 352:
return _wrapReduction<352>(Restarts,Threads,Pos,Cities);
case 384:
return _wrapReduction<384>(Restarts,Threads,Pos,Cities);
case 416:
return _wrapReduction<416>(Restarts,Threads,Pos,Cities);
case 448:
return _wrapReduction<448>(Restarts,Threads,Pos,Cities);
case 480:
return _wrapReduction<480>(Restarts,Threads,Pos,Cities);
case 512:
return _wrapReduction<512>(Restarts,Threads,Pos,Cities);
case 544:
return _wrapReduction<544>(Restarts,Threads,Pos,Cities);
case 576:
return _wrapReduction<576>(Restarts,Threads,Pos,Cities);
case 608:
return _wrapReduction<608>(Restarts,Threads,Pos,Cities);
case 640:
return _wrapReduction<640>(Restarts,Threads,Pos,Cities);
case 672:
return _wrapReduction<672>(Restarts,Threads,Pos,Cities);
case 704:
return _wrapReduction<704>(Restarts,Threads,Pos,Cities);
case 736:
return _wrapReduction<736>(Restarts,Threads,Pos,Cities);
case 768:
return _wrapReduction<768>(Restarts,Threads,Pos,Cities);
case 800:
return _wrapReduction<800>(Restarts,Threads,Pos,Cities);
case 832:
return _wrapReduction<832>(Restarts,Threads,Pos,Cities);
case 864:
return _wrapReduction<864>(Restarts,Threads,Pos,Cities);
case 896:
return _wrapReduction<896>(Restarts,Threads,Pos,Cities);
case 928:
return _wrapReduction<928>(Restarts,Threads,Pos,Cities);
case 960:
return _wrapReduction<960>(Restarts,Threads,Pos,Cities);
case 992:
return _wrapReduction<992>(Restarts,Threads,Pos,Cities);
case 1024:
return _wrapReduction<1024>(Restarts,Threads,Pos,Cities);
default:
std::cout << "Invalid TileSize = " << TileSize << std::endl;
exit(-1);
};
return -1;
}
//
// Description :
// Main entry point to our iterative Two-Opt solver.
// Options are ./<name> problem_file restarts <threads> <buffer_size>
//
// @argc - Number of command line parameters (including program name)
// @argv - Holds command line arguments
//
// @return - Returns 0 if success, otherwise failure.
int
main(int argc, char *argv[]) {
printf("2-opt TSP CUDA GPU code v2.1 [Kepler]\n");
printf("Copyright (c) 2014, Texas State University. All rights reserved.\n");
if (argc < 3 || argc > 5) {fprintf(stderr, "\narguments: input_file restart_count <threads> <tilesize> \n"); exit(-1);}
const int Restarts = atoi(argv[2]);
if (Restarts < 1) {fprintf(stderr, "restart_count is too small: %d\n", Restarts); exit(-1);}
Data *pos_d;
const int Cities = readInput(argv[1], &pos_d);
printf("configuration: %d cities, %d restarts, %s input\n", Cities, Restarts, argv[1]);
const int Threads = min(1024,(argc >= 4) ? next32(atoi(argv[3])) : next32(Cities));
const int TileSize = min(1024,(argc >= 5) ? next32(atoi(argv[4])) : Threads);
const float GpuExecutionTime = RunKernel(Cities,pos_d,Restarts,Threads,TileSize);
int hours = (int)(GpuExecutionTime / (3600.0f * 1000.0f));
int seconds = (int)(GpuExecutionTime/1000) % 60;
int minutes = (int)(GpuExecutionTime/1000) / 60;
long long moves = 1LL * climbs_d * (Cities - 2) * (Cities - 1) / 2;
std::cout << moves * 0.000001 / GpuExecutionTime << "Gmoves/s" << std::endl;
std::cout << "best found tour length = " << best_d << std::endl;
std::cout << "Total Time : " << GpuExecutionTime / 1000.0f << "s" << std::endl;
std::cout << "Hours = " << hours << ", Minutes = " << minutes << ", Seconds = " << seconds << ", Milliseconds = " << (int)(GpuExecutionTime) % 1000 << std::endl;
cudaDeviceReset();
cudaFree(pos_d);
return 0;
}
|
9e68c213cdd35659c4aa6537ee72a2d3ca617dea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define __MAKEMORE_COLONEL_CU__ 1
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include "colonel.hh"
namespace makemore {
int kdev = 0;
int kbs = 256;
void setkdev(int i) {
assert(i >= 0);
assert(i <= kndevs());
if (i > 0)
assert(0 == hipSetDevice(i - 1));
kdev = i;
}
void setkbs(int i) {
assert(i > 0);
kbs = i;
}
int kndevs() {
int ndevs = 0;
hipGetDeviceCount(&ndevs);
assert(ndevs >= 0);
return (1 + ndevs);
}
#undef syncthreads
#define syncthreads() __syncthreads()
#undef DEFN_KERNEL
#define DEFN_KERNEL(f, args...) \
__global__ void _gpu_ ## f(long __n, args)
#undef PREF_KERNEL
#define PREF_KERNEL \
long i = blockIdx.x * blockDim.x + threadIdx.x; \
if (i >= __n) \
return;
#undef PRE
#define PRE(x) _gpu_ ## x
#include "colonel-core.inc"
#undef syncthreads
#define syncthreads() assert(!"no syncthreads in cpu mode")
#undef __device__
#define __device__
#undef PRE
#define PRE(x) _cpu_ ## x
#undef DEFN_KERNEL
#define DEFN_KERNEL(f, args...) \
void _cpu_ ## f (long i, long __n, args)
#undef PREF_KERNEL
#define PREF_KERNEL \
if (i >= __n) \
return;
#include "colonel-core.inc"
#define CALL_KERNEL(f, _n, args...) do { \
long __n = (_n); \
if (kdev) { \
int __bs = kbs, __gs = ((__n + __bs - 1) / __bs); \
_gpu_ ##hipLaunchKernelGGL(( f) , dim3(__gs), dim3(__bs), 0, 0, __n, args); \
} else { \
for (long __i = __n - 1; __i >= 0; --__i) { \
_cpu_ ## f (__i, __n, args); \
} \
} \
} while (0);
void enkv(const void *a, unsigned int n, void *da) {
if (kdev) {
::hipMemcpy(da, a, n, hipMemcpyHostToDevice);
} else {
::memcpy(da, a, n);
}
}
void dekv(const void *da, unsigned int n, void *a) {
if (kdev)
::hipMemcpy(a, da, n, hipMemcpyDeviceToHost);
else
::memcpy(a, da, n);
}
void kmakev(void **dp, unsigned int n) {
if (kdev) {
void *d = NULL;
// assert(n > 0);
int ret = ::hipMalloc((void **)&d, n);
// assert(d != NULL);
assert(ret == 0);
*dp = d;
} else {
*dp = (void *)(new char[n]);
assert(*dp);
}
}
void kfreev(void *x) {
if (kdev)
::hipFree(x);
else
delete[] ((char *)x);
}
void kzerov(void *x, unsigned int n) {
if (kdev)
::hipMemset((void *)x, 0, n);
else
::memset(x, 0, n);
}
void kfill(double *x, unsigned int n, double v) {
if (kdev) {
double *y = new double[n];
for (unsigned int i = 0; i < n; ++i)
y[i] = v;
enk(y, n, x);
delete[] y;
} else {
for (unsigned int i = 0; i < n; ++i)
x[i] = v;
}
}
void kcopyv(const void *x, unsigned int n, void *y) {
if (kdev)
::hipMemcpy(y, x, n, hipMemcpyDeviceToDevice);
else
::memcpy(y, x, n);
}
#include "colonel-common.inc"
}
| 9e68c213cdd35659c4aa6537ee72a2d3ca617dea.cu | #define __MAKEMORE_COLONEL_CU__ 1
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include "colonel.hh"
namespace makemore {
int kdev = 0;
int kbs = 256;
void setkdev(int i) {
assert(i >= 0);
assert(i <= kndevs());
if (i > 0)
assert(0 == cudaSetDevice(i - 1));
kdev = i;
}
void setkbs(int i) {
assert(i > 0);
kbs = i;
}
int kndevs() {
int ndevs = 0;
cudaGetDeviceCount(&ndevs);
assert(ndevs >= 0);
return (1 + ndevs);
}
#undef syncthreads
#define syncthreads() __syncthreads()
#undef DEFN_KERNEL
#define DEFN_KERNEL(f, args...) \
__global__ void _gpu_ ## f(long __n, args)
#undef PREF_KERNEL
#define PREF_KERNEL \
long i = blockIdx.x * blockDim.x + threadIdx.x; \
if (i >= __n) \
return;
#undef PRE
#define PRE(x) _gpu_ ## x
#include "colonel-core.inc"
#undef syncthreads
#define syncthreads() assert(!"no syncthreads in cpu mode")
#undef __device__
#define __device__
#undef PRE
#define PRE(x) _cpu_ ## x
#undef DEFN_KERNEL
#define DEFN_KERNEL(f, args...) \
void _cpu_ ## f (long i, long __n, args)
#undef PREF_KERNEL
#define PREF_KERNEL \
if (i >= __n) \
return;
#include "colonel-core.inc"
#define CALL_KERNEL(f, _n, args...) do { \
long __n = (_n); \
if (kdev) { \
int __bs = kbs, __gs = ((__n + __bs - 1) / __bs); \
_gpu_ ## f <<<__gs, __bs>>>(__n, args); \
} else { \
for (long __i = __n - 1; __i >= 0; --__i) { \
_cpu_ ## f (__i, __n, args); \
} \
} \
} while (0);
void enkv(const void *a, unsigned int n, void *da) {
if (kdev) {
::cudaMemcpy(da, a, n, cudaMemcpyHostToDevice);
} else {
::memcpy(da, a, n);
}
}
void dekv(const void *da, unsigned int n, void *a) {
if (kdev)
::cudaMemcpy(a, da, n, cudaMemcpyDeviceToHost);
else
::memcpy(a, da, n);
}
void kmakev(void **dp, unsigned int n) {
if (kdev) {
void *d = NULL;
// assert(n > 0);
int ret = ::cudaMalloc((void **)&d, n);
// assert(d != NULL);
assert(ret == 0);
*dp = d;
} else {
*dp = (void *)(new char[n]);
assert(*dp);
}
}
void kfreev(void *x) {
if (kdev)
::cudaFree(x);
else
delete[] ((char *)x);
}
void kzerov(void *x, unsigned int n) {
if (kdev)
::cudaMemset((void *)x, 0, n);
else
::memset(x, 0, n);
}
void kfill(double *x, unsigned int n, double v) {
if (kdev) {
double *y = new double[n];
for (unsigned int i = 0; i < n; ++i)
y[i] = v;
enk(y, n, x);
delete[] y;
} else {
for (unsigned int i = 0; i < n; ++i)
x[i] = v;
}
}
void kcopyv(const void *x, unsigned int n, void *y) {
if (kdev)
::cudaMemcpy(y, x, n, cudaMemcpyDeviceToDevice);
else
::memcpy(y, x, n);
}
#include "colonel-common.inc"
}
|
f61cd534030faeb83ee14574515e318c618d2491.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/loss_kernel_util.h"
namespace oneflow {
namespace user_op {
namespace {
using namespace loss;
template<typename T>
__global__ void ComputeSmoothL1Out(int64_t elem_cnt, const T* input, const T* target, T* out,
const float beta) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T abs_diff = abs(input[i] - target[i]);
if (abs_diff < beta) {
out[i] = 0.5 * abs_diff * abs_diff / beta;
} else {
out[i] = abs_diff - 0.5 * beta;
}
}
}
template<>
__global__ void ComputeSmoothL1Out(int64_t elem_cnt, const half* input, const half* target,
half* out, const float beta) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
const half half_zero = __float2half(0.0);
const half half_one = __float2half(0.5);
const half half_beta = __float2half(beta);
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const half diff = __hsub(input[i], target[i]);
const half abs_diff = __hlt(diff, half_zero) ? __hneg(diff) : diff;
if (__hlt(abs_diff, half_beta)) {
out[i] = __hmul(__hmul(half_one, abs_diff), __hdiv(abs_diff, half_beta));
} else {
out[i] = __hsub(abs_diff, __hmul(half_one, half_beta));
}
}
#else
printf("use half need nvcc arch >= 530");
assert(false);
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/
}
template<typename T>
__global__ void ComputeSmoothL1GradOut(int64_t elem_cnt, const T* input, const T* target,
const T* dy, T* dx, const ReductionType reduction_type,
const float beta) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T diff = input[i] - target[i];
const T abs_diff = abs(diff);
if (abs_diff < beta) {
dx[i] = diff / beta;
} else {
dx[i] = (diff > GetZeroVal<T>()) - (diff < GetZeroVal<T>());
}
const T dy_val = reduction_type == ReductionType::kNone ? dy[i] : *dy;
dx[i] = dx[i] * dy_val;
if (reduction_type == ReductionType::kMean) { dx[i] /= elem_cnt; };
}
}
template<>
__global__ void ComputeSmoothL1GradOut(int64_t elem_cnt, const half* input, const half* target,
const half* dy, half* dx, const ReductionType reduction_type,
const float beta) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
const half half_zero = __float2half(0.0);
const half half_one = __float2half(1.0);
const half half_beta = __float2half(beta);
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const half diff = __hsub(input[i], target[i]);
const half abs_diff = __hlt(diff, half_zero) ? __hneg(diff) : diff;
if (__hlt(abs_diff, half_beta)) {
dx[i] = __hdiv(diff, half_beta);
} else {
const half left = __hgt(diff, half_zero) ? half_one : half_zero;
const half right = __hlt(diff, half_zero) ? half_one : half_zero;
dx[i] = __hsub(left, right);
}
const half dy_val = reduction_type == ReductionType::kNone ? dy[i] : *dy;
dx[i] = __hmul(dx[i], dy_val);
if (reduction_type == ReductionType::kMean) {
dx[i] = __hdiv(dx[i], __float2half(static_cast<float>(elem_cnt)));
};
}
#else
printf("use half need nvcc arch >= 530");
assert(false);
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/
}
template<typename T>
class SmoothL1LossKernel : public SimpleLossKernel<DeviceType::kGPU, T, SmoothL1LossKernel<T>> {
public:
void ComputeOut(user_op::KernelComputeContext* ctx, int64_t elem_cnt, const T* input,
const T* target, T* out) const {
const float beta = ctx->Attr<float>("beta");
hipLaunchKernelGGL(( ComputeSmoothL1Out), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(), elem_cnt, input, target, out, beta);
}
};
template<typename T>
class SmoothL1LossGradKernel
: public SimpleLossGradKernel<DeviceType::kGPU, T, SmoothL1LossGradKernel<T>> {
public:
void ComputeOut(user_op::KernelComputeContext* ctx, int64_t elem_cnt, const T* input,
const T* target, const T* dy, T* dx, const ReductionType reduction) const {
const float beta = ctx->Attr<float>("beta");
hipLaunchKernelGGL(( ComputeSmoothL1GradOut), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(), elem_cnt, input, target, dy, dx,
reduction, beta);
}
};
} // namespace
REGISTER_SIMPLE_LOSS_KERNEL_GPU("smooth_l1_loss", SmoothL1LossKernel)
REGISTER_SIMPLE_LOSS_GRAD_KERNEL_GPU("smooth_l1_loss_grad", SmoothL1LossGradKernel)
} // namespace user_op
} // namespace oneflow
| f61cd534030faeb83ee14574515e318c618d2491.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/loss_kernel_util.h"
namespace oneflow {
namespace user_op {
namespace {
using namespace loss;
template<typename T>
__global__ void ComputeSmoothL1Out(int64_t elem_cnt, const T* input, const T* target, T* out,
const float beta) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T abs_diff = abs(input[i] - target[i]);
if (abs_diff < beta) {
out[i] = 0.5 * abs_diff * abs_diff / beta;
} else {
out[i] = abs_diff - 0.5 * beta;
}
}
}
template<>
__global__ void ComputeSmoothL1Out(int64_t elem_cnt, const half* input, const half* target,
half* out, const float beta) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
const half half_zero = __float2half(0.0);
const half half_one = __float2half(0.5);
const half half_beta = __float2half(beta);
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const half diff = __hsub(input[i], target[i]);
const half abs_diff = __hlt(diff, half_zero) ? __hneg(diff) : diff;
if (__hlt(abs_diff, half_beta)) {
out[i] = __hmul(__hmul(half_one, abs_diff), __hdiv(abs_diff, half_beta));
} else {
out[i] = __hsub(abs_diff, __hmul(half_one, half_beta));
}
}
#else
printf("use half need nvcc arch >= 530");
assert(false);
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/
}
template<typename T>
__global__ void ComputeSmoothL1GradOut(int64_t elem_cnt, const T* input, const T* target,
const T* dy, T* dx, const ReductionType reduction_type,
const float beta) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T diff = input[i] - target[i];
const T abs_diff = abs(diff);
if (abs_diff < beta) {
dx[i] = diff / beta;
} else {
dx[i] = (diff > GetZeroVal<T>()) - (diff < GetZeroVal<T>());
}
const T dy_val = reduction_type == ReductionType::kNone ? dy[i] : *dy;
dx[i] = dx[i] * dy_val;
if (reduction_type == ReductionType::kMean) { dx[i] /= elem_cnt; };
}
}
template<>
__global__ void ComputeSmoothL1GradOut(int64_t elem_cnt, const half* input, const half* target,
const half* dy, half* dx, const ReductionType reduction_type,
const float beta) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
const half half_zero = __float2half(0.0);
const half half_one = __float2half(1.0);
const half half_beta = __float2half(beta);
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const half diff = __hsub(input[i], target[i]);
const half abs_diff = __hlt(diff, half_zero) ? __hneg(diff) : diff;
if (__hlt(abs_diff, half_beta)) {
dx[i] = __hdiv(diff, half_beta);
} else {
const half left = __hgt(diff, half_zero) ? half_one : half_zero;
const half right = __hlt(diff, half_zero) ? half_one : half_zero;
dx[i] = __hsub(left, right);
}
const half dy_val = reduction_type == ReductionType::kNone ? dy[i] : *dy;
dx[i] = __hmul(dx[i], dy_val);
if (reduction_type == ReductionType::kMean) {
dx[i] = __hdiv(dx[i], __float2half(static_cast<float>(elem_cnt)));
};
}
#else
printf("use half need nvcc arch >= 530");
assert(false);
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/
}
template<typename T>
class SmoothL1LossKernel : public SimpleLossKernel<DeviceType::kGPU, T, SmoothL1LossKernel<T>> {
public:
void ComputeOut(user_op::KernelComputeContext* ctx, int64_t elem_cnt, const T* input,
const T* target, T* out) const {
const float beta = ctx->Attr<float>("beta");
ComputeSmoothL1Out<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(elem_cnt, input, target, out, beta);
}
};
template<typename T>
class SmoothL1LossGradKernel
: public SimpleLossGradKernel<DeviceType::kGPU, T, SmoothL1LossGradKernel<T>> {
public:
void ComputeOut(user_op::KernelComputeContext* ctx, int64_t elem_cnt, const T* input,
const T* target, const T* dy, T* dx, const ReductionType reduction) const {
const float beta = ctx->Attr<float>("beta");
ComputeSmoothL1GradOut<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(elem_cnt, input, target, dy, dx,
reduction, beta);
}
};
} // namespace
REGISTER_SIMPLE_LOSS_KERNEL_GPU("smooth_l1_loss", SmoothL1LossKernel)
REGISTER_SIMPLE_LOSS_GRAD_KERNEL_GPU("smooth_l1_loss_grad", SmoothL1LossGradKernel)
} // namespace user_op
} // namespace oneflow
|
ab7f59d3b7aa37f85e954edea421272f3d54f2ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Fermat
*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <renderer.h>
#include <renderer_impl.h>
#include <pathtracer.h>
#include <rt.h>
#include <files.h>
#include <bpt.h>
#include <mlt.h>
#include <cmlt.h>
#include <pssmlt.h>
#include <rpt.h>
#include <psfpt.h>
#include <fermat_loader.h>
#include <pbrt_importer.h>
#include <mesh/MeshStorage.h>
#include <eaw.h>
#include <xbl.h>
#include <cugar/basic/cuda/arch.h>
#include <cugar/basic/cuda/timer.h>
#include <cugar/basic/primitives.h>
#include <cugar/basic/functors.h>
#include <cugar/basic/cuda/sort.h>
#include <cugar/basic/timer.h>
#include <cugar/image/tga.h>
#include <cugar/image/pfm.h>
#include <cugar/bsdf/ltc.h>
#include <buffers.h>
#include <vector>
namespace ltc_ggx
{
typedef float mat33[9];
#include <cugar/bsdf/ltc_ggx.inc>
};
void load_assimp(const char* filename, MeshStorage& out_mesh, const std::vector<std::string>& dirs, std::vector<std::string>& scene_dirs);
//------------------------------------------------------------------------------
__global__ void fill_n_kernel(const int n, uint32_t* pixels)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < n) pixels[idx] = idx;
}
void fill_n(const int n, Buffer<uint32_t>& pixels)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(n, blockSize.x));
fill_n_kernel << < gridSize, blockSize >> >(n, pixels.ptr());
}
//------------------------------------------------------------------------------
__global__ void to_rgba_kernel(const RenderingContextView renderer, uint8* rgba)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
if (renderer.shading_mode == kShaded)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::COMPOSITED_C, idx);
//cugar::Vector4f c =
// renderer.fb(FBufferDesc::DIRECT_C, idx) +
// renderer.fb(FBufferDesc::DIFFUSE_C, idx) * renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
// renderer.fb(FBufferDesc::SPECULAR_C, idx) * renderer.fb(FBufferDesc::SPECULAR_A, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kFiltered)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::FILTERED_C, idx);
//cugar::Vector4f c =
// renderer.fb(FBufferDesc::DIRECT_C, idx) +
// renderer.fb(FBufferDesc::DIFFUSE_C, idx) * renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
// renderer.fb(FBufferDesc::SPECULAR_C, idx) * renderer.fb(FBufferDesc::SPECULAR_A, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
renderer.fb(FBufferDesc::SPECULAR_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDiffuseAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kSpecularAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::SPECULAR_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDiffuseColor)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kSpecularColor)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::SPECULAR_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDirectLighting)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIRECT_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kVariance)
{
float c = renderer.fb(FBufferDesc::COMPOSITED_C, idx).w;
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + 1);
c = powf(c, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kUV)
{
cugar::Vector4f c = renderer.fb.gbuffer.uv(idx);
// visualize the ST interpolated texture coordinates
c.x = c.z;
c.y = c.w;
c.z = 0.5f;
c.w = 0.0f;
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kCharts)
{
const uint32 tri_id = renderer.fb.gbuffer.tri(idx);
// find the chart containing this triangle
const uint32 group_id = tri_id < renderer.mesh.num_triangles ?
cugar::upper_bound_index( tri_id, renderer.mesh.group_offsets, renderer.mesh.num_groups+1 ) : uint32(-1);
// visualize the chart index as a color
cugar::Vector4f c;
c.x = cugar::randfloat(0, group_id) * 0.5f + 0.5f;
c.y = cugar::randfloat(1, group_id) * 0.5f + 0.5f;
c.z = cugar::randfloat(2, group_id) * 0.5f + 0.5f;
c.w = 0.0f;
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kNormal)
{
cugar::Vector4f geo = renderer.fb.gbuffer.geo(idx);
cugar::Vector3f normal = GBufferView::unpack_normal(geo);
rgba[idx * 4 + 0] = uint8(fminf(normal.x * 128.0f + 128.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(normal.y * 128.0f + 128.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(normal.z * 128.0f + 128.0f, 255.0f));
rgba[idx * 4 + 3] = 0;
}
else if (renderer.shading_mode >= kAux0 && (renderer.shading_mode - kAux0 < renderer.fb.n_channels - FBufferDesc::NUM_CHANNELS))
{
const uint32 aux_channel = renderer.shading_mode - kAux0 + FBufferDesc::NUM_CHANNELS;
cugar::Vector4f c = renderer.fb(aux_channel, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
}
}
void to_rgba(const RenderingContextView renderer, uint8* rgba)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(renderer.res_x * renderer.res_y, blockSize.x));
hipLaunchKernelGGL(( to_rgba_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, renderer, rgba);
CUDA_CHECK(cugar::cuda::sync_and_check_error("to_rgba"));
}
//------------------------------------------------------------------------------
__global__ void multiply_frame_kernel(RenderingContextView renderer, const float scale)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
// before scaling, save out luminance data
renderer.fb(FBufferDesc::LUMINANCE, idx) = cugar::Vector4f(
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIRECT_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIFFUSE_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::SPECULAR_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::COMPOSITED_C, idx)).xyz()) );
renderer.fb(FBufferDesc::DIFFUSE_C, idx) *= scale;
renderer.fb(FBufferDesc::DIFFUSE_A, idx) *= scale;
renderer.fb(FBufferDesc::SPECULAR_C, idx) *= scale;
renderer.fb(FBufferDesc::SPECULAR_A, idx) *= scale;
renderer.fb(FBufferDesc::DIRECT_C, idx) *= scale;
renderer.fb(FBufferDesc::COMPOSITED_C, idx) *= scale;
}
}
//------------------------------------------------------------------------------
__global__ void clamp_frame_kernel(RenderingContextView renderer, const float max_value)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
renderer.fb(FBufferDesc::DIFFUSE_C, idx) = cugar::min( cugar::Vector4f(renderer.fb(FBufferDesc::DIFFUSE_C, idx)), max_value );
renderer.fb(FBufferDesc::SPECULAR_C, idx) = cugar::min( cugar::Vector4f(renderer.fb(FBufferDesc::SPECULAR_C, idx)), max_value );
renderer.fb(FBufferDesc::DIRECT_C, idx) = cugar::min( cugar::Vector4f(renderer.fb(FBufferDesc::DIRECT_C, idx)), max_value );
renderer.fb(FBufferDesc::COMPOSITED_C, idx) = cugar::min( cugar::Vector4f(renderer.fb(FBufferDesc::COMPOSITED_C, idx)), max_value );
FERMAT_ASSERT(
cugar::is_finite( renderer.fb(FBufferDesc::COMPOSITED_C, idx).x ) &&
cugar::is_finite( renderer.fb(FBufferDesc::COMPOSITED_C, idx).y ) &&
cugar::is_finite( renderer.fb(FBufferDesc::COMPOSITED_C, idx).z ) &&
cugar::is_finite( renderer.fb(FBufferDesc::COMPOSITED_C, idx).w ) );
}
}
//------------------------------------------------------------------------------
__global__ void update_variances_kernel(RenderingContextView renderer, const uint32 n)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
// fetch the previous frame's luminances
const cugar::Vector4f old_lum = renderer.fb(FBufferDesc::LUMINANCE, idx);
// compute the new frame's luminances
const cugar::Vector4f new_lum = cugar::Vector4f(
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIRECT_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIFFUSE_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::SPECULAR_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::COMPOSITED_C, idx)).xyz()) );
// compute the change in variance (x(n) - avg(n-1))*(x(n) - avg(n)), which can be written as the sum of two terms:
// 1. n*avg(n) - (n-1)*avg(n-1) - avg(n-1) = n*(avg(n) - avg(n-1))
// 2. n*avg(n) - (n-1)*avg(n-1) - avg(n) = (n-1)*(avg(n) - avg(n-1))
const cugar::Vector4f delta_lum_1 = n * (new_lum - old_lum);
const cugar::Vector4f delta_lum_2 = (n - 1) * (new_lum - old_lum);
const cugar::Vector4f delta_var = (delta_lum_1 * delta_lum_2) / (n*n);
// add the variance deltas to the old variances (previously rescaled by (n-1)/n) stored in the alpha components of the respective channels
renderer.fb(FBufferDesc::DIRECT_C, idx).w += delta_var.x;
renderer.fb(FBufferDesc::DIFFUSE_C, idx).w += delta_var.y;
renderer.fb(FBufferDesc::SPECULAR_C, idx).w += delta_var.z;
renderer.fb(FBufferDesc::COMPOSITED_C, idx).w += delta_var.w;
}
}
//------------------------------------------------------------------------------
__global__ void filter_variance_kernel(const FBufferChannelView img, float* var, const uint32 FW)
{
const uint32 x = threadIdx.x + blockIdx.x*blockDim.x;
const uint32 y = threadIdx.y + blockIdx.y*blockDim.y;
if (x < img.res_x &&
y < img.res_y)
{
const int32 lx = x > FW ? x - FW : 0;
const int32 rx = x + FW < img.res_x ? x + FW : img.res_x - 1;
const int32 ly = y > FW ? y - FW : 0;
const int32 ry = y + FW < img.res_y ? y + FW : img.res_y - 1;
float variance = 0.0f;
for (int yy = ly; yy <= ry; yy++)
for (int xx = lx; xx <= rx; xx++)
variance += img(xx, yy).w;
variance /= (ry - ly + 1) * (rx - lx + 1);
var[x + y * img.res_x] = variance;
}
}
void filter_variance(const FBufferChannelView img, float* var, const uint32 FW = 1)
{
dim3 blockSize(32, 4);
dim3 gridSize(cugar::divide_ri(img.res_x, blockSize.x), cugar::divide_ri(img.res_y, blockSize.y));
filter_variance_kernel << < gridSize, blockSize >> > (img, var, FW);
CUDA_CHECK(cugar::cuda::sync_and_check_error("filter_variance"));
}
//------------------------------------------------------------------------------
void RenderingContextImpl::multiply_frame(const float scale)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(m_res_x * m_res_y, blockSize.x));
hipLaunchKernelGGL(( multiply_frame_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, view(0), scale);
CUDA_CHECK( cugar::cuda::sync_and_check_error("multiply_frame") );
}
//------------------------------------------------------------------------------
void RenderingContextImpl::rescale_frame(const uint32 instance)
{
multiply_frame( float(instance)/float(instance+1) );
}
// clamp the output framebuffer to a given maximum
//
// \param max_value
void RenderingContextImpl::clamp_frame(const float max_value)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(m_res_x * m_res_y, blockSize.x));
hipLaunchKernelGGL(( clamp_frame_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, view(0), max_value);
CUDA_CHECK( cugar::cuda::sync_and_check_error("clamp_frame") );
}
//------------------------------------------------------------------------------
void RenderingContextImpl::update_variances(const uint32 instance)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(m_res_x * m_res_y, blockSize.x));
hipLaunchKernelGGL(( update_variances_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, view(0), instance + 1);
CUDA_CHECK( cugar::cuda::sync_and_check_error("update_variances") );
}
// load a plugin
//
uint32 RenderingContextImpl::load_plugin(const char* plugin_name)
{
typedef uint32 (__stdcall *register_plugin_function)(RenderingContext& renderer);
fprintf(stderr, " loading plugin \"%s\"... started\n", plugin_name);
m_plugins.push_back(DLL(plugin_name));
register_plugin_function plugin_entry_function = (register_plugin_function)m_plugins.front().get_proc_address("register_plugin");
if (!plugin_entry_function)
{
fprintf(stderr, "failed loading plugin entry function!\n");
throw cugar::runtime_error("failed loading plugin entry function");
}
fprintf(stderr, " loading plugin \"%s\"... done\n", plugin_name);
fprintf(stderr, " initializing plugin \"%s\"... started\n", plugin_name);
const uint32 r = plugin_entry_function( *m_this );
fprintf(stderr, " initializing plugin \"%s\"... done\n", plugin_name);
return r;
}
//------------------------------------------------------------------------------
// RenderingContext initialization
//
void RenderingContextImpl::init(int argc, char** argv)
{
const char* filename = NULL;
register_renderer("pt", &PathTracer::factory );
register_renderer("bpt", &BPT::factory );
register_renderer("cmlt", &CMLT::factory );
register_renderer("mlt", &MLT::factory );
register_renderer("pssmlt", &PSSMLT::factory );
register_renderer("rpt", &RPT::factory );
register_renderer("psfpt", &PSFPT::factory );
//register_renderer("hellopt", &HelloPT::factory );
m_renderer_type = kBPT;
m_exposure = 1.0f;
m_gamma = 2.2f;
m_res_x = 1600;
m_res_y = 900;
m_aspect = 0.0f;
m_shading_rate = 1.0f;
m_shading_mode = kShaded;
// set the directional light
m_light.dir = cugar::normalize(cugar::Vector3f(1.0f,-0.5f,1.0f));
m_light.color = cugar::Vector3f(22.0f,21.0f,18.0f)*4;
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "-i") == 0)
filename = argv[++i];
else if (strcmp(argv[i], "-r") == 0 ||
strcmp(argv[i], "-res") == 0)
{
m_res_x = atoi(argv[++i]);
m_res_y = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-a") == 0 ||
strcmp(argv[i], "-aspect") == 0)
{
m_aspect = (float)atof(argv[++i]);
}
else if (strcmp(argv[i], "-c") == 0)
{
FILE* camera_file = fopen(argv[++i], "r");
if (camera_file == NULL)
{
fprintf(stderr, "failed opening camera file %s\n", argv[i]);
exit(0);
}
fscanf(camera_file, "%f %f %f", &m_camera.eye.x, &m_camera.eye.y, &m_camera.eye.z);
fscanf(camera_file, "%f %f %f", &m_camera.aim.x, &m_camera.aim.y, &m_camera.aim.z);
fscanf(camera_file, "%f %f %f", &m_camera.up.x, &m_camera.up.y, &m_camera.up.z);
fscanf(camera_file, "%f", &m_camera.fov);
m_camera.dx = normalize(cross(m_camera.aim - m_camera.eye, m_camera.up));
fclose(camera_file);
}
else if (strcmp(argv[i], "-plugin") == 0)
{
m_renderer_type = load_plugin( argv[++i] );
m_renderer = m_renderer_factories[m_renderer_type]();
}
else if (argv[i][0] == '-')
{
for (uint32 r = 0; r < m_renderer_names.size(); ++r)
{
if (m_renderer_names[r] == argv[i]+1)
{
m_renderer_type = r;
m_renderer = m_renderer_factories[r]();
}
}
}
}
if (m_aspect == 0.0f)
m_aspect = float(m_res_x) / float(m_res_y);
if (filename == NULL)
{
fprintf(stderr, "options:\n");
fprintf(stderr, " -i scene.obj specify the input scene\n");
fprintf(stderr, " -r int int specify the resolution\n");
fprintf(stderr, " -a float specify the aspect ratio\n");
fprintf(stderr, " -c camera.txt specify a camera file\n");
fprintf(stderr, " -pt use the PT renderer\n");
fprintf(stderr, " -bpt use the BPT renderer\n");
fprintf(stderr, " -mlt use the MLT renderer\n");
fprintf(stderr, " -cmlt use the CMLT renderer\n");
fprintf(stderr, " -pssmlt use the PSSMLT renderer\n");
exit(0);
}
bool overwrite_camera = false;
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "-c") == 0)
{
FILE* camera_file = fopen(argv[++i], "r");
if (camera_file == NULL)
{
fprintf(stderr, "failed opening camera file %s\n", argv[i]);
exit(0);
}
fscanf(camera_file, "%f %f %f", &m_camera.eye.x, &m_camera.eye.y, &m_camera.eye.z);
fscanf(camera_file, "%f %f %f", &m_camera.aim.x, &m_camera.aim.y, &m_camera.aim.z);
fscanf(camera_file, "%f %f %f", &m_camera.up.x, &m_camera.up.y, &m_camera.up.z);
fscanf(camera_file, "%f", &m_camera.fov);
m_camera.dx = normalize(cross(m_camera.aim - m_camera.eye, m_camera.up));
fclose(camera_file);
overwrite_camera = true;
}
}
m_rgba.alloc(m_res_x * m_res_y * 4);
m_var.alloc(m_res_x * m_res_y);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
fprintf(stderr, "cuda device: %s\n", prop.name);
fprintf(stderr, " SM version : %d.%d\n",
prop.major, prop.minor);
fprintf(stderr, " SM count : %d \n",
prop.multiProcessorCount);
fprintf(stderr, " SM clock : %d \n",
prop.clockRate);
fprintf(stderr, " mem clock : %d \n",
prop.memoryClockRate);
size_t free, total;
hipMemGetInfo(&free, &total);
fprintf(stderr, " memory : %.3f GB\n",
float(total) / (1024 * 1024 * 1024));
std::vector<unsigned int> devices(1);
devices[0] = 0;
hipSetDevice( devices[0] );
// make sure we do have a renderer
if (m_renderer == NULL)
m_renderer = PathTracer::factory();
const uint32 aux_channels = m_renderer->auxiliary_channel_count();
m_fb.set_channel_count(FBufferDesc::NUM_CHANNELS + aux_channels);
m_fb.set_channel(FBufferDesc::DIFFUSE_C, "diffuse_color");
m_fb.set_channel(FBufferDesc::DIFFUSE_A, "diffuse_albedo");
m_fb.set_channel(FBufferDesc::SPECULAR_C, "specular_color");
m_fb.set_channel(FBufferDesc::SPECULAR_A, "specular_albedo");
m_fb.set_channel(FBufferDesc::DIRECT_C, "direct_color");
m_fb.set_channel(FBufferDesc::COMPOSITED_C, "composited_color");
m_fb.set_channel(FBufferDesc::FILTERED_C, "filtered_color");
m_fb.set_channel(FBufferDesc::LUMINANCE, "luminance");
m_renderer->register_auxiliary_channels( m_fb, FBufferDesc::NUM_CHANNELS );
m_fb.resize(m_res_x, m_res_y);
m_fb_temp[0].resize(m_res_x, m_res_y);
m_fb_temp[1].resize(m_res_x, m_res_y);
m_fb_temp[2].resize(m_res_x, m_res_y);
m_fb_temp[3].resize(m_res_x, m_res_y);
#if 0
// pre-computer the samples buffer
m_samples.alloc(m_res_x * m_res_y);
{
DomainBuffer<RTP_BUFFER_TYPE_HOST, float2> samples(m_res_x * m_res_y);
cugar::MJSampler sampler;
sampler.sample(m_res_x, m_res_y, (cugar::Vector2f*)samples.ptr());
m_samples = samples;
}
#endif
// Load the glossy reflectance profile
{
fprintf(stderr, "initializing glossy reflectance profile... started\n");
DomainBuffer<HOST_BUFFER, float> glossy_reflectance;
const uint32 S = 32;
glossy_reflectance.alloc(S*S*S*S);
ScopedFile file("glossy_reflectance.dat", "rb");
if (!file)
{
fprintf(stderr, " error opening glossy_reflectance.dat\n");
exit(1);
}
if (fread(glossy_reflectance.ptr(), sizeof(float), S*S*S*S, file) != S*S*S*S)
{
fprintf(stderr, " error loading glossy_reflectance.dat\n");
exit(1);
}
m_glossy_reflectance = glossy_reflectance;
fprintf(stderr, "initializing glossy reflectance profile... done\n");
}
// Load the LTC coefficients
{
fprintf(stderr, "initializing LTC coefficients... started\n");
DomainBuffer<HOST_BUFFER, float4> ltc_M;
DomainBuffer<HOST_BUFFER, float4> ltc_Minv;
ltc_M.alloc(ltc_ggx::size * ltc_ggx::size);
ltc_Minv.alloc(ltc_ggx::size * ltc_ggx::size);
cugar::LTCBsdf::preprocess(ltc_ggx::size, (const cugar::Matrix3x3f*)ltc_ggx::tabM, ltc_M.ptr(), ltc_Minv.ptr());
m_ltc_size = ltc_ggx::size;
m_ltc_M = ltc_M;
m_ltc_Minv = ltc_Minv;
m_ltc_A.alloc(ltc_ggx::size * ltc_ggx::size);
m_ltc_A.copy_from(ltc_ggx::size * ltc_ggx::size, HOST_BUFFER, ltc_ggx::tabAmplitude);
fprintf(stderr, "initializing LTC coefficients... done\n");
}
fprintf(stderr, "loading mesh file %s... started\n", filename);
std::vector<std::string> scene_dirs;
{
scene_dirs.push_back(""); // always look in the current directory
char local_path[2048];
extract_path(filename, local_path);
scene_dirs.push_back(local_path);
}
// Create the Model object
//
try
{
std::vector<std::string> dirs = scene_dirs;
std::vector<Camera> cameras;
std::vector<DirectionalLight> dir_lights;
if (strlen(filename) > 3 && strcmp(filename+strlen(filename)-3, ".fa") == 0)
load_scene(filename, m_mesh, cameras, dir_lights, dirs, scene_dirs);
else if ((strlen(filename) > 4 && strcmp(filename+strlen(filename)-4, ".obj") == 0) ||
(strlen(filename) > 4 && strcmp(filename+strlen(filename)-4, ".ply") == 0))
loadModel(filename, m_mesh);
else if (strlen(filename) > 5 && strcmp(filename+strlen(filename)-5, ".pbrt") == 0)
{
pbrt::FermatImporter importer(filename, &m_mesh, &m_camera, &dir_lights, &scene_dirs);
pbrt::import(filename, &importer);
importer.finish();
// copy the film options
m_exposure = importer.m_film.exposure;
m_gamma = importer.m_film.gamma;
}
else
load_assimp(filename, m_mesh, dirs, scene_dirs);
// check whether we need to pick the loaded camera
if (cameras.size() && overwrite_camera == false)
m_camera = cameras[0];
// store directional lights on both host and device
m_dir_lights_h.alloc( dir_lights.size() );
m_dir_lights_h.copy_from( dir_lights.size(), HOST_BUFFER, &dir_lights.front() );
m_dir_lights_d = m_dir_lights_h;
// perform normal compression
m_mesh.compress_normals();
m_mesh.compress_tex();
#if UNIFIED_VERTEX_ATTRIBUTES
// unify vertex attributes
unify_vertex_attributes( m_mesh );
#endif
// apply material flags
apply_material_flags( m_mesh );
// compute the bbox
if (1)
{
cugar::Vector3f bmin(1.0e16f, 1.0e16f, 1.0e16f);
cugar::Vector3f bmax(-1.0e16f, -1.0e16f, -1.0e16f);
MeshView::vertex_type* v = reinterpret_cast<MeshView::vertex_type*>(m_mesh.getVertexData());
for (int32_t i = 0; i < m_mesh.getNumVertices(); ++i)
{
bmin = cugar::min(bmin, vertex_comp(v[i]));
bmax = cugar::max(bmax, vertex_comp(v[i]));
}
// print the bounding box
fprintf(stderr, " bbox[%f, %f, %f][%f, %f, %f]\n",
bmin[0], bmin[1], bmin[2],
bmax[0], bmax[1], bmax[2]);
}
}
catch (MeshException e)
{
fprintf(stderr, " error loading mesh file %s : %s\n", filename, e.what());
exit(1);
}
fprintf(stderr, "loading mesh file %s... done\n", filename);
fprintf(stderr, " triangles : %d\n", m_mesh.getNumTriangles());
fprintf(stderr, " vertices : %d\n", m_mesh.getNumVertices());
fprintf(stderr, " normals : %d\n", m_mesh.getNumNormals());
fprintf(stderr, " materials : %d\n", m_mesh.getNumMaterials());
fprintf(stderr, " groups : %d\n", m_mesh.getNumGroups());
{
// print the group names
for (int32 i = 0; i < m_mesh.getNumGroups(); ++i)
fprintf(stderr, " group[%d] : %s, %u triangles\n", i,
m_mesh.getGroupName(i).c_str(),
m_mesh.getGroupOffsets()[i + 1] - m_mesh.getGroupOffsets()[i]);
}
// load all textures
{
fprintf(stderr, "loading %u textures... started\n", (uint32)m_mesh.m_textures.size());
m_textures_h.resize( m_mesh.m_textures.size() );
m_textures_d.resize( m_mesh.m_textures.size() );
for (size_t i = 0; i < m_mesh.m_textures.size(); ++i)
{
m_textures_h[i] = HostMipMapStoragePtr(new MipMapStorage<HOST_BUFFER>());
m_textures_d[i] = DeviceMipMapStoragePtr(new MipMapStorage<CUDA_BUFFER>());
// try to load the texture
char local_path[2048];
extract_path(filename, local_path);
char texture_name[2048];
strcpy(texture_name, m_mesh.m_textures[i].c_str());
if (find_file(texture_name, scene_dirs))
{
if (strcmp(texture_name + strlen(texture_name) - 4, ".tga") == 0)
{
cugar::TGAHeader tga_header;
unsigned char* rgb = cugar::load_tga(texture_name, &tga_header);
if (rgb)
{
MipMapStorage<HOST_BUFFER>::TexturePtr texture_h(new TextureStorage<HOST_BUFFER>());
texture_h->resize(tga_header.width, tga_header.height);
float4* tex = texture_h->ptr();
for (uint32 p = 0; p < uint32(tga_header.width) * uint32(tga_header.height); ++p)
tex[p] = make_float4(
float(rgb[3 * p + 0]) / 255.0f,
float(rgb[3 * p + 1]) / 255.0f,
float(rgb[3 * p + 2]) / 255.0f,
0.0f);
// generate the mipmap for this texture
m_textures_h[i]->set(texture_h);
// and copy it to the device
*m_textures_d[i] = *m_textures_h[i];
delete[] rgb;
}
else
fprintf(stderr, "warning: unable to load texture %s\n", texture_name);
}
else if (strcmp(texture_name + strlen(texture_name) - 4, ".pfm") == 0)
{
uint32 width, height;
float* rgb = cugar::load_pfm(texture_name, &width, &height);
if (rgb)
{
MipMapStorage<HOST_BUFFER>::TexturePtr texture_h(new TextureStorage<HOST_BUFFER>());
texture_h->resize(width, height);
float4* tex = texture_h->ptr();
for (uint32 p = 0; p < width * height; ++p)
tex[p] = make_float4(
float(rgb[3 * p + 0]),
float(rgb[3 * p + 1]),
float(rgb[3 * p + 2]),
0.0f);
// generate the mipmap for this texture
m_textures_h[i]->set(texture_h);
// and copy it to the device
*m_textures_d[i] = *m_textures_h[i];
delete[] rgb;
}
else
fprintf(stderr, "warning: unable to load texture %s\n", texture_name);
}
else
fprintf(stderr, "warning: unsupported texture format %s\n", texture_name);
}
else
fprintf(stderr, "warning: unable to find texture %s\n", texture_name);
}
m_texture_views_h.alloc(m_mesh.m_textures.size());
for (uint32 i = 0; i < m_textures_h.size(); ++i)
m_texture_views_h.set(i, m_textures_h[i]->view());
m_texture_views_d.alloc(m_mesh.m_textures.size());
for (uint32 i = 0; i < m_textures_d.size(); ++i)
m_texture_views_d.set(i, m_textures_d[i]->view());
fprintf(stderr, "loading %u textures... done\n", (uint32)m_mesh.m_textures.size());
}
// checking materials
for (int32_t i = 0; i < m_mesh.getNumTriangles(); ++i)
{
const int m = m_mesh.getMaterialIndices()[i];
if (m < 0 || m >= m_mesh.getNumMaterials())
{
fprintf(stderr, "material[%u] : %u out of range\n", i, m);
exit(1);
}
}
#if 0
fprintf(stderr, "creating UV index... started\n");
{
// initialize a uv-bvh on the host
HostUVBvh uv_bvh;
build( &uv_bvh, m_mesh );
output_uv_tris( m_mesh );
// and copy it to the device
m_uv_bvh = uv_bvh;
}
fprintf(stderr, "creating UV index... done\n");
#endif
// copy to the device
m_mesh_d = m_mesh;
{
size_t mem_free, mem_tot;
hipSetDevice(0);
hipMemGetInfo(&mem_free, &mem_tot);
fprintf(stderr, "free device memory: %.3f GB\n", float(mem_free) / (1024 * 1024 * 1024));
}
fprintf(stderr, "creating RT index... started\n");
#if 1
m_rt_context = new RTContext();
m_rt_context->create_geometry(
m_mesh_d.getNumTriangles(),
m_mesh_d.getVertexIndices(),
m_mesh_d.getNumVertices(),
m_mesh_d.getVertexData(),
m_mesh_d.getNormalIndices(),
m_mesh_d.getNormalData(),
m_mesh_d.getTextureCoordinateIndices(),
m_mesh_d.getTextureCoordinateData(),
m_mesh_d.getMaterialIndices());
// setup the material buffer
m_rt_context->bind_buffer( "g_materials", m_mesh_d.getNumMaterials(), sizeof(MeshMaterial), m_mesh_d.m_materials.ptr(), RT_FORMAT_USER );
// setup texture buffers
//m_rt_context->bind_buffer( "g_textures", m_texture_views_d.count(), sizeof(MipMapView), m_texture_views_d.ptr(), RT_FORMAT_USER );
// perform a small test launch
//m_rt_context->launch(0,128);
#else
m_rt_context = NULL;
#endif
fprintf(stderr, "creating RT index... done\n");
const uint32 n_dimensions = 6 * 12;
const uint32 tiled_dim = 256;
fprintf(stderr, " initializing sampler: %u dimensions\n", n_dimensions);
m_sequence.setup(n_dimensions, tiled_dim);
fprintf(stderr, "initializing path sampler... started\n");
m_renderer->init(argc, argv, *m_this);
fprintf(stderr, "initializing path sampler... done\n");
{
size_t mem_free, mem_tot;
hipSetDevice(0);
hipMemGetInfo(&mem_free, &mem_tot);
fprintf(stderr, "free device memory: %.3f GB\n", float(mem_free) / (1024 * 1024 * 1024));
}
#if 0
cugar::host_vector<uint32_t> h_randoms(1024 * 1024);
for (uint32_t i = 0; i < 1024 * 1024; ++i)
h_randoms[i] = rand();
cugar::device_vector<uint32_t> d_randoms = h_randoms;
cugar::device_vector<uint32_t> d_vals = h_randoms;
cugar::device_vector<uint8_t> temp_storage;
cugar::radix_sort<cugar::device_tag>(1024 * 1024, cugar::raw_pointer(d_randoms), cugar::raw_pointer(d_vals), temp_storage);
for (uint32_t i = 0; i < 10; ++i)
{
d_randoms = h_randoms;
const uint32_t n_keys = (1u << (i + 1)) * 1024;
cugar::cuda::Timer timer;
timer.start();
cugar::radix_sort<cugar::device_tag>(n_keys, cugar::raw_pointer(d_randoms), cugar::raw_pointer(d_vals), temp_storage);
timer.stop();
fprintf(stderr, "%u K items : %.2fms\n", n_keys / 1024, timer.seconds() * 1000.0f);
}
#endif
}
void RenderingContextImpl::clear()
{
for (uint32_t c = 0; c < m_fb.channel_count(); ++c)
m_fb.channels[c].clear();
}
void RenderingContextImpl::update_model()
{
m_rt_context->create_geometry(
m_mesh_d.getNumTriangles(),
m_mesh_d.getVertexIndices(),
m_mesh_d.getNumVertices(),
m_mesh_d.getVertexData(),
m_mesh_d.getNormalIndices(),
m_mesh_d.getNormalData(),
m_mesh_d.getTextureCoordinateIndices(),
m_mesh_d.getTextureCoordinateData(),
m_mesh_d.getMaterialIndices());
// TODO: update m_mesh_lights if needed!
m_renderer->update_scene(*m_this);
// TODO: update the m_rt_context!
}
// register a new rendering interface type
//
uint32 RenderingContextImpl::register_renderer(const char* name, RendererFactoryFunction factory)
{
m_renderer_names.push_back( name );
m_renderer_factories.push_back( factory );
return uint32( m_renderer_factories.size() - 1 );
}
// RenderingContext display function
//
void RenderingContextImpl::render(const uint32 instance)
{
try
{
RenderingContextView renderer_view = view(instance);
// setup optix vars
m_rt_context->bind_var( "g_renderer", renderer_view );
// clear the primary Gbuffer
m_fb.gbuffer.clear();
//hipDeviceSynchronize();
m_renderer->render(instance, *m_this);
// apply filtering, if enabled
if (m_shading_mode == kFiltered)
filter( instance );
to_rgba(renderer_view, m_rgba.ptr());
}
catch (cugar::cuda_error& error)
{
fprintf(stderr, "caught cuda error: %s\n", error.what());
exit(0);
}
}
RenderingContextView RenderingContextImpl::view(const uint32 instance)
{
RenderingContextView renderer_view(
m_camera,
(uint32)m_dir_lights_d.count(),
m_dir_lights_d.ptr(),
m_mesh_d.view(),
m_mesh_lights.view(false),
m_mesh_lights.view(true),
m_texture_views_d.ptr(),
m_ltc_size,
m_ltc_M.ptr(),
m_ltc_Minv.ptr(),
m_ltc_A.ptr(),
m_glossy_reflectance.ptr(),
m_res_x,
m_res_y,
m_aspect,
m_exposure,
m_gamma,
m_shading_rate,
m_shading_mode,
m_fb.view(),
instance );
return renderer_view;
}
// compute the scene's bbox
//
cugar::Bbox3f RenderingContextImpl::compute_bbox()
{
MeshView mesh_view = m_mesh.view();
cugar::Bbox3f bbox;
for (int32_t i = 0; i < m_mesh.getNumVertices(); ++i)
bbox.insert( load_vertex( mesh_view, i ) );
return bbox;
}
void RenderingContextImpl::filter(const uint32 instance)
{
// clear the output filter
m_fb.channels[FBufferDesc::FILTERED_C] = m_fb.channels[FBufferDesc::DIRECT_C];
FBufferChannelView output = m_fb.channels[FBufferDesc::FILTERED_C].view();
cugar::Vector3f U, V, W;
camera_frame( m_camera, m_aspect, U, V, W );
#if 1
// setup some ping-pong buffers
FBufferChannelView pingpong[2];
pingpong[0] = m_fb_temp[0].view();
pingpong[1] = m_fb_temp[1].view();
EAWParams eaw_params;
eaw_params.phi_normal = /*sqrtf(float(instance + 1)) **/ 2.0f;
eaw_params.phi_position = /*sqrtf(float(instance + 1)) **/ 1.0f;
//eaw_params.phi_color = float(instance + 1) / 20.0f;
eaw_params.phi_color = float(instance*instance + 1) / 10000.0f;
eaw_params.E = m_camera.eye;
eaw_params.U = U;
eaw_params.V = V;
eaw_params.W = W;
const uint32 n_iterations = 7;
// filter the diffuse channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::DIFFUSE_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::DIFFUSE_A].view();
filter_variance(input, m_var.ptr(), 2);
EAW(
n_iterations,
output, // destination
weight, // weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
eaw_params, pingpong);
}
// filter the specular channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::SPECULAR_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::SPECULAR_A].view();
filter_variance(input, m_var.ptr(), 2);
EAW(
n_iterations,
output, // destination
weight, // weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
eaw_params, pingpong);
}
#elif 0
XBLParams xbl_params;
xbl_params.taps = 32;
xbl_params.phi_normal = 32.0f;
xbl_params.phi_position = 1.0f;
xbl_params.phi_color = 0.0f;
//xbl_params.phi_color = float(instance*instance + 1) / 10000.0f;
//eaw_params.phi_color = float(instance*instance + 1) / 10000.0f;
xbl_params.E = m_camera.eye;
xbl_params.U = U;
xbl_params.V = V;
xbl_params.W = W;
// filter the diffuse channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::DIFFUSE_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::DIFFUSE_A].view();
filter_variance(input, m_var.ptr(), 2);
XBL(
output, // destination
FilterOp(kFilterOpDemodulateInput | kFilterOpModulateOutput | kFilterOpAddMode),
weight, // weight
1.0e-4f, // min weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
xbl_params,
21u,
1u,
m_sequence.view());
}
// filter the specular channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::SPECULAR_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::SPECULAR_A].view();
filter_variance(input, m_var.ptr(), 2);
XBL(
output, // destination
FilterOp(kFilterOpDemodulateInput | kFilterOpModulateOutput | kFilterOpAddMode),
weight, // weight
1.0e-4f, // min weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
xbl_params,
21u,
1u,
m_sequence.view());
}
#endif
}
// constructor
//
RenderingContext::RenderingContext()
{
m_impl = new RenderingContextImpl( this );
}
// initialize the renderer
//
void RenderingContext::init(int argc, char** argv)
{
m_impl->init( argc, argv );
}
// render a frame
//
// \param instance the sequence instance / frame number in a progressive render
void RenderingContext::render(const uint32 instance)
{
m_impl->render( instance );
}
// clear all framebuffers
//
void RenderingContext::clear()
{
m_impl->clear();
}
// rescale the output framebuffer by a constant
//
void RenderingContext::multiply_frame(const float scale)
{
m_impl->multiply_frame( scale );
}
// rescale the output framebuffer by n/(n-1)
//
// \param instance the sequence instance / frame number in a progressive render, used for rescaling
void RenderingContext::rescale_frame(const uint32 instance)
{
m_impl->rescale_frame( instance );
}
// clamp the output framebuffer to a given maximum
//
// \param max_value
void RenderingContext::clamp_frame(const float max_value)
{
m_impl->clamp_frame( max_value );
}
// update the variance estimates
//
// \param instance the sequence instance / frame number in a progressive render, used for rescaling
void RenderingContext::update_variances(const uint32 instance)
{
m_impl->update_variances( instance );
}
// update the internal data-structures (e.g. BVHs) associated to the geometry
//
void RenderingContext::update_model()
{
m_impl->update_model();
}
// perform filtering
//
// \param instance the sequence instance / frame number in a progressive render
void RenderingContext::filter(const uint32 instance)
{
m_impl->filter( instance );
}
// return the current output resolution
//
uint2 RenderingContext::res() const { return m_impl->res(); }
// return a view of the renderer
//
RenderingContextView RenderingContext::view(const uint32 instance) { return m_impl->view( instance ); }
// return the camera
//
Camera& RenderingContext::get_camera() { return m_impl->get_camera(); }
// return the directional light count
//
uint32 RenderingContext::get_directional_light_count() const
{
return (uint32)m_impl->m_dir_lights_d.count();
}
// return the host-side directional lights
//
const DirectionalLight* RenderingContext::get_host_directional_lights() const
{
return m_impl->m_dir_lights_h.ptr();
}
// return the device-side directional lights
//
const DirectionalLight* RenderingContext::get_device_directional_lights() const
{
return m_impl->m_dir_lights_d.ptr();
}
// set the number of directional lights
//
void RenderingContext::set_directional_light_count(const uint32 count)
{
m_impl->m_dir_lights_h.alloc( count );
m_impl->m_dir_lights_d.alloc( count );
}
// set a directional light
//
void RenderingContext::set_directional_light(const uint32 i, const DirectionalLight& light)
{
m_impl->m_dir_lights_h.set( i, light );
m_impl->m_dir_lights_d.set( i, light );
}
// return the target resolution
//
uint2 RenderingContext::get_res() const { return m_impl->get_res(); }
// return the target aspect ratio
//
float RenderingContext::get_aspect_ratio() const { return m_impl->get_aspect_ratio(); }
// return the target exposure
//
void RenderingContext::set_aspect_ratio(const float v) { m_impl->m_aspect = v; }
// return the target exposure
//
float RenderingContext::get_exposure() const { return m_impl->get_exposure(); }
// set the target exposure
//
void RenderingContext::set_exposure(const float v) { m_impl->m_exposure = v; }
// return the target gamma
//
float RenderingContext::get_gamma() const { return m_impl->m_gamma; }
// set the target gamma
//
void RenderingContext::set_gamma(const float v) { m_impl->m_gamma = v; }
// return the shading mode
//
ShadingMode& RenderingContext::get_shading_mode() { return m_impl->m_shading_mode; }
// return the frame buffer
//
FBufferStorage& RenderingContext::get_frame_buffer() { return m_impl->m_fb; }
// return the frame buffer
//
uint8* RenderingContext::get_device_rgba_buffer() { return m_impl->m_rgba.ptr(); }
// return the number of textures
//
uint32 RenderingContext::get_texture_count() const { return uint32( m_impl->m_textures_h.size() ); }
// return the scene's host-side textures
//
RenderingContext::HostMipMapStoragePtr* RenderingContext::get_host_textures() { return &m_impl->m_textures_h.front(); }
// return the scene's device-side textures
//
RenderingContext::DeviceMipMapStoragePtr* RenderingContext::get_device_textures() { return &m_impl->m_textures_d.front(); }
// return the scene's host-side textures
//
MipMapView* RenderingContext::get_host_texture_views() { return m_impl->get_host_texture_views(); }
// return the scene's device-side textures
//
MipMapView* RenderingContext::get_device_texture_views() { return m_impl->get_device_texture_views(); }
// return the scene's host-side mesh
//
MeshStorage& RenderingContext::get_host_mesh() { return m_impl->get_host_mesh(); }
// return the scene's device-side mesh
//
DeviceMeshStorage& RenderingContext::get_device_mesh() { return m_impl->get_device_mesh(); }
// return the scene's device-side mesh emitters
//
MeshLightsStorage& RenderingContext::get_mesh_lights() { return m_impl->get_mesh_lights(); }
// return the ray tracing context
//
RTContext* RenderingContext::get_rt_context() const { return m_impl->get_rt_context(); }
// return the sampling sequence
//
TiledSequence& RenderingContext::get_sequence() { return m_impl->m_sequence; }
// return the renderer
//
RendererInterface* RenderingContext::get_renderer() const { return m_impl->get_renderer(); }
// register a new rendering interface type
//
uint32 RenderingContext::register_renderer(const char* name, RendererFactoryFunction factory)
{
return m_impl->register_renderer( name, factory );
}
// compute the scene's bbox
//
cugar::Bbox3f RenderingContext::compute_bbox() { return m_impl->compute_bbox(); }
| ab7f59d3b7aa37f85e954edea421272f3d54f2ee.cu | /*
* Fermat
*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <renderer.h>
#include <renderer_impl.h>
#include <pathtracer.h>
#include <rt.h>
#include <files.h>
#include <bpt.h>
#include <mlt.h>
#include <cmlt.h>
#include <pssmlt.h>
#include <rpt.h>
#include <psfpt.h>
#include <fermat_loader.h>
#include <pbrt_importer.h>
#include <mesh/MeshStorage.h>
#include <eaw.h>
#include <xbl.h>
#include <cugar/basic/cuda/arch.h>
#include <cugar/basic/cuda/timer.h>
#include <cugar/basic/primitives.h>
#include <cugar/basic/functors.h>
#include <cugar/basic/cuda/sort.h>
#include <cugar/basic/timer.h>
#include <cugar/image/tga.h>
#include <cugar/image/pfm.h>
#include <cugar/bsdf/ltc.h>
#include <buffers.h>
#include <vector>
namespace ltc_ggx
{
typedef float mat33[9];
#include <cugar/bsdf/ltc_ggx.inc>
};
void load_assimp(const char* filename, MeshStorage& out_mesh, const std::vector<std::string>& dirs, std::vector<std::string>& scene_dirs);
//------------------------------------------------------------------------------
__global__ void fill_n_kernel(const int n, uint32_t* pixels)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < n) pixels[idx] = idx;
}
void fill_n(const int n, Buffer<uint32_t>& pixels)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(n, blockSize.x));
fill_n_kernel << < gridSize, blockSize >> >(n, pixels.ptr());
}
//------------------------------------------------------------------------------
__global__ void to_rgba_kernel(const RenderingContextView renderer, uint8* rgba)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
if (renderer.shading_mode == kShaded)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::COMPOSITED_C, idx);
//cugar::Vector4f c =
// renderer.fb(FBufferDesc::DIRECT_C, idx) +
// renderer.fb(FBufferDesc::DIFFUSE_C, idx) * renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
// renderer.fb(FBufferDesc::SPECULAR_C, idx) * renderer.fb(FBufferDesc::SPECULAR_A, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kFiltered)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::FILTERED_C, idx);
//cugar::Vector4f c =
// renderer.fb(FBufferDesc::DIRECT_C, idx) +
// renderer.fb(FBufferDesc::DIFFUSE_C, idx) * renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
// renderer.fb(FBufferDesc::SPECULAR_C, idx) * renderer.fb(FBufferDesc::SPECULAR_A, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
renderer.fb(FBufferDesc::SPECULAR_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDiffuseAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kSpecularAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::SPECULAR_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDiffuseColor)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kSpecularColor)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::SPECULAR_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDirectLighting)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIRECT_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kVariance)
{
float c = renderer.fb(FBufferDesc::COMPOSITED_C, idx).w;
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + 1);
c = powf(c, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kUV)
{
cugar::Vector4f c = renderer.fb.gbuffer.uv(idx);
// visualize the ST interpolated texture coordinates
c.x = c.z;
c.y = c.w;
c.z = 0.5f;
c.w = 0.0f;
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kCharts)
{
const uint32 tri_id = renderer.fb.gbuffer.tri(idx);
// find the chart containing this triangle
const uint32 group_id = tri_id < renderer.mesh.num_triangles ?
cugar::upper_bound_index( tri_id, renderer.mesh.group_offsets, renderer.mesh.num_groups+1 ) : uint32(-1);
// visualize the chart index as a color
cugar::Vector4f c;
c.x = cugar::randfloat(0, group_id) * 0.5f + 0.5f;
c.y = cugar::randfloat(1, group_id) * 0.5f + 0.5f;
c.z = cugar::randfloat(2, group_id) * 0.5f + 0.5f;
c.w = 0.0f;
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kNormal)
{
cugar::Vector4f geo = renderer.fb.gbuffer.geo(idx);
cugar::Vector3f normal = GBufferView::unpack_normal(geo);
rgba[idx * 4 + 0] = uint8(fminf(normal.x * 128.0f + 128.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(normal.y * 128.0f + 128.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(normal.z * 128.0f + 128.0f, 255.0f));
rgba[idx * 4 + 3] = 0;
}
else if (renderer.shading_mode >= kAux0 && (renderer.shading_mode - kAux0 < renderer.fb.n_channels - FBufferDesc::NUM_CHANNELS))
{
const uint32 aux_channel = renderer.shading_mode - kAux0 + FBufferDesc::NUM_CHANNELS;
cugar::Vector4f c = renderer.fb(aux_channel, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / renderer.gamma);
c.y = powf(c.y, 1.0f / renderer.gamma);
c.z = powf(c.z, 1.0f / renderer.gamma);
c.w = powf(c.w, 1.0f / renderer.gamma);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
}
}
void to_rgba(const RenderingContextView renderer, uint8* rgba)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(renderer.res_x * renderer.res_y, blockSize.x));
to_rgba_kernel <<< gridSize, blockSize >>>(renderer, rgba);
CUDA_CHECK(cugar::cuda::sync_and_check_error("to_rgba"));
}
//------------------------------------------------------------------------------
__global__ void multiply_frame_kernel(RenderingContextView renderer, const float scale)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
// before scaling, save out luminance data
renderer.fb(FBufferDesc::LUMINANCE, idx) = cugar::Vector4f(
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIRECT_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIFFUSE_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::SPECULAR_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::COMPOSITED_C, idx)).xyz()) );
renderer.fb(FBufferDesc::DIFFUSE_C, idx) *= scale;
renderer.fb(FBufferDesc::DIFFUSE_A, idx) *= scale;
renderer.fb(FBufferDesc::SPECULAR_C, idx) *= scale;
renderer.fb(FBufferDesc::SPECULAR_A, idx) *= scale;
renderer.fb(FBufferDesc::DIRECT_C, idx) *= scale;
renderer.fb(FBufferDesc::COMPOSITED_C, idx) *= scale;
}
}
//------------------------------------------------------------------------------
__global__ void clamp_frame_kernel(RenderingContextView renderer, const float max_value)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
renderer.fb(FBufferDesc::DIFFUSE_C, idx) = cugar::min( cugar::Vector4f(renderer.fb(FBufferDesc::DIFFUSE_C, idx)), max_value );
renderer.fb(FBufferDesc::SPECULAR_C, idx) = cugar::min( cugar::Vector4f(renderer.fb(FBufferDesc::SPECULAR_C, idx)), max_value );
renderer.fb(FBufferDesc::DIRECT_C, idx) = cugar::min( cugar::Vector4f(renderer.fb(FBufferDesc::DIRECT_C, idx)), max_value );
renderer.fb(FBufferDesc::COMPOSITED_C, idx) = cugar::min( cugar::Vector4f(renderer.fb(FBufferDesc::COMPOSITED_C, idx)), max_value );
FERMAT_ASSERT(
cugar::is_finite( renderer.fb(FBufferDesc::COMPOSITED_C, idx).x ) &&
cugar::is_finite( renderer.fb(FBufferDesc::COMPOSITED_C, idx).y ) &&
cugar::is_finite( renderer.fb(FBufferDesc::COMPOSITED_C, idx).z ) &&
cugar::is_finite( renderer.fb(FBufferDesc::COMPOSITED_C, idx).w ) );
}
}
//------------------------------------------------------------------------------
__global__ void update_variances_kernel(RenderingContextView renderer, const uint32 n)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
// fetch the previous frame's luminances
const cugar::Vector4f old_lum = renderer.fb(FBufferDesc::LUMINANCE, idx);
// compute the new frame's luminances
const cugar::Vector4f new_lum = cugar::Vector4f(
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIRECT_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIFFUSE_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::SPECULAR_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::COMPOSITED_C, idx)).xyz()) );
// compute the change in variance (x(n) - avg(n-1))*(x(n) - avg(n)), which can be written as the sum of two terms:
// 1. n*avg(n) - (n-1)*avg(n-1) - avg(n-1) = n*(avg(n) - avg(n-1))
// 2. n*avg(n) - (n-1)*avg(n-1) - avg(n) = (n-1)*(avg(n) - avg(n-1))
const cugar::Vector4f delta_lum_1 = n * (new_lum - old_lum);
const cugar::Vector4f delta_lum_2 = (n - 1) * (new_lum - old_lum);
const cugar::Vector4f delta_var = (delta_lum_1 * delta_lum_2) / (n*n);
// add the variance deltas to the old variances (previously rescaled by (n-1)/n) stored in the alpha components of the respective channels
renderer.fb(FBufferDesc::DIRECT_C, idx).w += delta_var.x;
renderer.fb(FBufferDesc::DIFFUSE_C, idx).w += delta_var.y;
renderer.fb(FBufferDesc::SPECULAR_C, idx).w += delta_var.z;
renderer.fb(FBufferDesc::COMPOSITED_C, idx).w += delta_var.w;
}
}
//------------------------------------------------------------------------------
__global__ void filter_variance_kernel(const FBufferChannelView img, float* var, const uint32 FW)
{
const uint32 x = threadIdx.x + blockIdx.x*blockDim.x;
const uint32 y = threadIdx.y + blockIdx.y*blockDim.y;
if (x < img.res_x &&
y < img.res_y)
{
const int32 lx = x > FW ? x - FW : 0;
const int32 rx = x + FW < img.res_x ? x + FW : img.res_x - 1;
const int32 ly = y > FW ? y - FW : 0;
const int32 ry = y + FW < img.res_y ? y + FW : img.res_y - 1;
float variance = 0.0f;
for (int yy = ly; yy <= ry; yy++)
for (int xx = lx; xx <= rx; xx++)
variance += img(xx, yy).w;
variance /= (ry - ly + 1) * (rx - lx + 1);
var[x + y * img.res_x] = variance;
}
}
void filter_variance(const FBufferChannelView img, float* var, const uint32 FW = 1)
{
dim3 blockSize(32, 4);
dim3 gridSize(cugar::divide_ri(img.res_x, blockSize.x), cugar::divide_ri(img.res_y, blockSize.y));
filter_variance_kernel << < gridSize, blockSize >> > (img, var, FW);
CUDA_CHECK(cugar::cuda::sync_and_check_error("filter_variance"));
}
//------------------------------------------------------------------------------
void RenderingContextImpl::multiply_frame(const float scale)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(m_res_x * m_res_y, blockSize.x));
multiply_frame_kernel <<< gridSize, blockSize >>>(view(0), scale);
CUDA_CHECK( cugar::cuda::sync_and_check_error("multiply_frame") );
}
//------------------------------------------------------------------------------
void RenderingContextImpl::rescale_frame(const uint32 instance)
{
multiply_frame( float(instance)/float(instance+1) );
}
// clamp the output framebuffer to a given maximum
//
// \param max_value
void RenderingContextImpl::clamp_frame(const float max_value)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(m_res_x * m_res_y, blockSize.x));
clamp_frame_kernel <<< gridSize, blockSize >>>(view(0), max_value);
CUDA_CHECK( cugar::cuda::sync_and_check_error("clamp_frame") );
}
//------------------------------------------------------------------------------
void RenderingContextImpl::update_variances(const uint32 instance)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(m_res_x * m_res_y, blockSize.x));
update_variances_kernel <<< gridSize, blockSize >>>(view(0), instance + 1);
CUDA_CHECK( cugar::cuda::sync_and_check_error("update_variances") );
}
// load a plugin
//
uint32 RenderingContextImpl::load_plugin(const char* plugin_name)
{
typedef uint32 (__stdcall *register_plugin_function)(RenderingContext& renderer);
fprintf(stderr, " loading plugin \"%s\"... started\n", plugin_name);
m_plugins.push_back(DLL(plugin_name));
register_plugin_function plugin_entry_function = (register_plugin_function)m_plugins.front().get_proc_address("register_plugin");
if (!plugin_entry_function)
{
fprintf(stderr, "failed loading plugin entry function!\n");
throw cugar::runtime_error("failed loading plugin entry function");
}
fprintf(stderr, " loading plugin \"%s\"... done\n", plugin_name);
fprintf(stderr, " initializing plugin \"%s\"... started\n", plugin_name);
const uint32 r = plugin_entry_function( *m_this );
fprintf(stderr, " initializing plugin \"%s\"... done\n", plugin_name);
return r;
}
//------------------------------------------------------------------------------
// RenderingContext initialization
//
void RenderingContextImpl::init(int argc, char** argv)
{
const char* filename = NULL;
register_renderer("pt", &PathTracer::factory );
register_renderer("bpt", &BPT::factory );
register_renderer("cmlt", &CMLT::factory );
register_renderer("mlt", &MLT::factory );
register_renderer("pssmlt", &PSSMLT::factory );
register_renderer("rpt", &RPT::factory );
register_renderer("psfpt", &PSFPT::factory );
//register_renderer("hellopt", &HelloPT::factory );
m_renderer_type = kBPT;
m_exposure = 1.0f;
m_gamma = 2.2f;
m_res_x = 1600;
m_res_y = 900;
m_aspect = 0.0f;
m_shading_rate = 1.0f;
m_shading_mode = kShaded;
// set the directional light
m_light.dir = cugar::normalize(cugar::Vector3f(1.0f,-0.5f,1.0f));
m_light.color = cugar::Vector3f(22.0f,21.0f,18.0f)*4;
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "-i") == 0)
filename = argv[++i];
else if (strcmp(argv[i], "-r") == 0 ||
strcmp(argv[i], "-res") == 0)
{
m_res_x = atoi(argv[++i]);
m_res_y = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-a") == 0 ||
strcmp(argv[i], "-aspect") == 0)
{
m_aspect = (float)atof(argv[++i]);
}
else if (strcmp(argv[i], "-c") == 0)
{
FILE* camera_file = fopen(argv[++i], "r");
if (camera_file == NULL)
{
fprintf(stderr, "failed opening camera file %s\n", argv[i]);
exit(0);
}
fscanf(camera_file, "%f %f %f", &m_camera.eye.x, &m_camera.eye.y, &m_camera.eye.z);
fscanf(camera_file, "%f %f %f", &m_camera.aim.x, &m_camera.aim.y, &m_camera.aim.z);
fscanf(camera_file, "%f %f %f", &m_camera.up.x, &m_camera.up.y, &m_camera.up.z);
fscanf(camera_file, "%f", &m_camera.fov);
m_camera.dx = normalize(cross(m_camera.aim - m_camera.eye, m_camera.up));
fclose(camera_file);
}
else if (strcmp(argv[i], "-plugin") == 0)
{
m_renderer_type = load_plugin( argv[++i] );
m_renderer = m_renderer_factories[m_renderer_type]();
}
else if (argv[i][0] == '-')
{
for (uint32 r = 0; r < m_renderer_names.size(); ++r)
{
if (m_renderer_names[r] == argv[i]+1)
{
m_renderer_type = r;
m_renderer = m_renderer_factories[r]();
}
}
}
}
if (m_aspect == 0.0f)
m_aspect = float(m_res_x) / float(m_res_y);
if (filename == NULL)
{
fprintf(stderr, "options:\n");
fprintf(stderr, " -i scene.obj specify the input scene\n");
fprintf(stderr, " -r int int specify the resolution\n");
fprintf(stderr, " -a float specify the aspect ratio\n");
fprintf(stderr, " -c camera.txt specify a camera file\n");
fprintf(stderr, " -pt use the PT renderer\n");
fprintf(stderr, " -bpt use the BPT renderer\n");
fprintf(stderr, " -mlt use the MLT renderer\n");
fprintf(stderr, " -cmlt use the CMLT renderer\n");
fprintf(stderr, " -pssmlt use the PSSMLT renderer\n");
exit(0);
}
bool overwrite_camera = false;
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "-c") == 0)
{
FILE* camera_file = fopen(argv[++i], "r");
if (camera_file == NULL)
{
fprintf(stderr, "failed opening camera file %s\n", argv[i]);
exit(0);
}
fscanf(camera_file, "%f %f %f", &m_camera.eye.x, &m_camera.eye.y, &m_camera.eye.z);
fscanf(camera_file, "%f %f %f", &m_camera.aim.x, &m_camera.aim.y, &m_camera.aim.z);
fscanf(camera_file, "%f %f %f", &m_camera.up.x, &m_camera.up.y, &m_camera.up.z);
fscanf(camera_file, "%f", &m_camera.fov);
m_camera.dx = normalize(cross(m_camera.aim - m_camera.eye, m_camera.up));
fclose(camera_file);
overwrite_camera = true;
}
}
m_rgba.alloc(m_res_x * m_res_y * 4);
m_var.alloc(m_res_x * m_res_y);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
fprintf(stderr, "cuda device: %s\n", prop.name);
fprintf(stderr, " SM version : %d.%d\n",
prop.major, prop.minor);
fprintf(stderr, " SM count : %d \n",
prop.multiProcessorCount);
fprintf(stderr, " SM clock : %d \n",
prop.clockRate);
fprintf(stderr, " mem clock : %d \n",
prop.memoryClockRate);
size_t free, total;
cudaMemGetInfo(&free, &total);
fprintf(stderr, " memory : %.3f GB\n",
float(total) / (1024 * 1024 * 1024));
std::vector<unsigned int> devices(1);
devices[0] = 0;
cudaSetDevice( devices[0] );
// make sure we do have a renderer
if (m_renderer == NULL)
m_renderer = PathTracer::factory();
const uint32 aux_channels = m_renderer->auxiliary_channel_count();
m_fb.set_channel_count(FBufferDesc::NUM_CHANNELS + aux_channels);
m_fb.set_channel(FBufferDesc::DIFFUSE_C, "diffuse_color");
m_fb.set_channel(FBufferDesc::DIFFUSE_A, "diffuse_albedo");
m_fb.set_channel(FBufferDesc::SPECULAR_C, "specular_color");
m_fb.set_channel(FBufferDesc::SPECULAR_A, "specular_albedo");
m_fb.set_channel(FBufferDesc::DIRECT_C, "direct_color");
m_fb.set_channel(FBufferDesc::COMPOSITED_C, "composited_color");
m_fb.set_channel(FBufferDesc::FILTERED_C, "filtered_color");
m_fb.set_channel(FBufferDesc::LUMINANCE, "luminance");
m_renderer->register_auxiliary_channels( m_fb, FBufferDesc::NUM_CHANNELS );
m_fb.resize(m_res_x, m_res_y);
m_fb_temp[0].resize(m_res_x, m_res_y);
m_fb_temp[1].resize(m_res_x, m_res_y);
m_fb_temp[2].resize(m_res_x, m_res_y);
m_fb_temp[3].resize(m_res_x, m_res_y);
#if 0
// pre-computer the samples buffer
m_samples.alloc(m_res_x * m_res_y);
{
DomainBuffer<RTP_BUFFER_TYPE_HOST, float2> samples(m_res_x * m_res_y);
cugar::MJSampler sampler;
sampler.sample(m_res_x, m_res_y, (cugar::Vector2f*)samples.ptr());
m_samples = samples;
}
#endif
// Load the glossy reflectance profile
{
fprintf(stderr, "initializing glossy reflectance profile... started\n");
DomainBuffer<HOST_BUFFER, float> glossy_reflectance;
const uint32 S = 32;
glossy_reflectance.alloc(S*S*S*S);
ScopedFile file("glossy_reflectance.dat", "rb");
if (!file)
{
fprintf(stderr, " error opening glossy_reflectance.dat\n");
exit(1);
}
if (fread(glossy_reflectance.ptr(), sizeof(float), S*S*S*S, file) != S*S*S*S)
{
fprintf(stderr, " error loading glossy_reflectance.dat\n");
exit(1);
}
m_glossy_reflectance = glossy_reflectance;
fprintf(stderr, "initializing glossy reflectance profile... done\n");
}
// Load the LTC coefficients
{
fprintf(stderr, "initializing LTC coefficients... started\n");
DomainBuffer<HOST_BUFFER, float4> ltc_M;
DomainBuffer<HOST_BUFFER, float4> ltc_Minv;
ltc_M.alloc(ltc_ggx::size * ltc_ggx::size);
ltc_Minv.alloc(ltc_ggx::size * ltc_ggx::size);
cugar::LTCBsdf::preprocess(ltc_ggx::size, (const cugar::Matrix3x3f*)ltc_ggx::tabM, ltc_M.ptr(), ltc_Minv.ptr());
m_ltc_size = ltc_ggx::size;
m_ltc_M = ltc_M;
m_ltc_Minv = ltc_Minv;
m_ltc_A.alloc(ltc_ggx::size * ltc_ggx::size);
m_ltc_A.copy_from(ltc_ggx::size * ltc_ggx::size, HOST_BUFFER, ltc_ggx::tabAmplitude);
fprintf(stderr, "initializing LTC coefficients... done\n");
}
fprintf(stderr, "loading mesh file %s... started\n", filename);
std::vector<std::string> scene_dirs;
{
scene_dirs.push_back(""); // always look in the current directory
char local_path[2048];
extract_path(filename, local_path);
scene_dirs.push_back(local_path);
}
// Create the Model object
//
try
{
std::vector<std::string> dirs = scene_dirs;
std::vector<Camera> cameras;
std::vector<DirectionalLight> dir_lights;
if (strlen(filename) > 3 && strcmp(filename+strlen(filename)-3, ".fa") == 0)
load_scene(filename, m_mesh, cameras, dir_lights, dirs, scene_dirs);
else if ((strlen(filename) > 4 && strcmp(filename+strlen(filename)-4, ".obj") == 0) ||
(strlen(filename) > 4 && strcmp(filename+strlen(filename)-4, ".ply") == 0))
loadModel(filename, m_mesh);
else if (strlen(filename) > 5 && strcmp(filename+strlen(filename)-5, ".pbrt") == 0)
{
pbrt::FermatImporter importer(filename, &m_mesh, &m_camera, &dir_lights, &scene_dirs);
pbrt::import(filename, &importer);
importer.finish();
// copy the film options
m_exposure = importer.m_film.exposure;
m_gamma = importer.m_film.gamma;
}
else
load_assimp(filename, m_mesh, dirs, scene_dirs);
// check whether we need to pick the loaded camera
if (cameras.size() && overwrite_camera == false)
m_camera = cameras[0];
// store directional lights on both host and device
m_dir_lights_h.alloc( dir_lights.size() );
m_dir_lights_h.copy_from( dir_lights.size(), HOST_BUFFER, &dir_lights.front() );
m_dir_lights_d = m_dir_lights_h;
// perform normal compression
m_mesh.compress_normals();
m_mesh.compress_tex();
#if UNIFIED_VERTEX_ATTRIBUTES
// unify vertex attributes
unify_vertex_attributes( m_mesh );
#endif
// apply material flags
apply_material_flags( m_mesh );
// compute the bbox
if (1)
{
cugar::Vector3f bmin(1.0e16f, 1.0e16f, 1.0e16f);
cugar::Vector3f bmax(-1.0e16f, -1.0e16f, -1.0e16f);
MeshView::vertex_type* v = reinterpret_cast<MeshView::vertex_type*>(m_mesh.getVertexData());
for (int32_t i = 0; i < m_mesh.getNumVertices(); ++i)
{
bmin = cugar::min(bmin, vertex_comp(v[i]));
bmax = cugar::max(bmax, vertex_comp(v[i]));
}
// print the bounding box
fprintf(stderr, " bbox[%f, %f, %f][%f, %f, %f]\n",
bmin[0], bmin[1], bmin[2],
bmax[0], bmax[1], bmax[2]);
}
}
catch (MeshException e)
{
fprintf(stderr, " error loading mesh file %s : %s\n", filename, e.what());
exit(1);
}
fprintf(stderr, "loading mesh file %s... done\n", filename);
fprintf(stderr, " triangles : %d\n", m_mesh.getNumTriangles());
fprintf(stderr, " vertices : %d\n", m_mesh.getNumVertices());
fprintf(stderr, " normals : %d\n", m_mesh.getNumNormals());
fprintf(stderr, " materials : %d\n", m_mesh.getNumMaterials());
fprintf(stderr, " groups : %d\n", m_mesh.getNumGroups());
{
// print the group names
for (int32 i = 0; i < m_mesh.getNumGroups(); ++i)
fprintf(stderr, " group[%d] : %s, %u triangles\n", i,
m_mesh.getGroupName(i).c_str(),
m_mesh.getGroupOffsets()[i + 1] - m_mesh.getGroupOffsets()[i]);
}
// load all textures
{
fprintf(stderr, "loading %u textures... started\n", (uint32)m_mesh.m_textures.size());
m_textures_h.resize( m_mesh.m_textures.size() );
m_textures_d.resize( m_mesh.m_textures.size() );
for (size_t i = 0; i < m_mesh.m_textures.size(); ++i)
{
m_textures_h[i] = HostMipMapStoragePtr(new MipMapStorage<HOST_BUFFER>());
m_textures_d[i] = DeviceMipMapStoragePtr(new MipMapStorage<CUDA_BUFFER>());
// try to load the texture
char local_path[2048];
extract_path(filename, local_path);
char texture_name[2048];
strcpy(texture_name, m_mesh.m_textures[i].c_str());
if (find_file(texture_name, scene_dirs))
{
if (strcmp(texture_name + strlen(texture_name) - 4, ".tga") == 0)
{
cugar::TGAHeader tga_header;
unsigned char* rgb = cugar::load_tga(texture_name, &tga_header);
if (rgb)
{
MipMapStorage<HOST_BUFFER>::TexturePtr texture_h(new TextureStorage<HOST_BUFFER>());
texture_h->resize(tga_header.width, tga_header.height);
float4* tex = texture_h->ptr();
for (uint32 p = 0; p < uint32(tga_header.width) * uint32(tga_header.height); ++p)
tex[p] = make_float4(
float(rgb[3 * p + 0]) / 255.0f,
float(rgb[3 * p + 1]) / 255.0f,
float(rgb[3 * p + 2]) / 255.0f,
0.0f);
// generate the mipmap for this texture
m_textures_h[i]->set(texture_h);
// and copy it to the device
*m_textures_d[i] = *m_textures_h[i];
delete[] rgb;
}
else
fprintf(stderr, "warning: unable to load texture %s\n", texture_name);
}
else if (strcmp(texture_name + strlen(texture_name) - 4, ".pfm") == 0)
{
uint32 width, height;
float* rgb = cugar::load_pfm(texture_name, &width, &height);
if (rgb)
{
MipMapStorage<HOST_BUFFER>::TexturePtr texture_h(new TextureStorage<HOST_BUFFER>());
texture_h->resize(width, height);
float4* tex = texture_h->ptr();
for (uint32 p = 0; p < width * height; ++p)
tex[p] = make_float4(
float(rgb[3 * p + 0]),
float(rgb[3 * p + 1]),
float(rgb[3 * p + 2]),
0.0f);
// generate the mipmap for this texture
m_textures_h[i]->set(texture_h);
// and copy it to the device
*m_textures_d[i] = *m_textures_h[i];
delete[] rgb;
}
else
fprintf(stderr, "warning: unable to load texture %s\n", texture_name);
}
else
fprintf(stderr, "warning: unsupported texture format %s\n", texture_name);
}
else
fprintf(stderr, "warning: unable to find texture %s\n", texture_name);
}
m_texture_views_h.alloc(m_mesh.m_textures.size());
for (uint32 i = 0; i < m_textures_h.size(); ++i)
m_texture_views_h.set(i, m_textures_h[i]->view());
m_texture_views_d.alloc(m_mesh.m_textures.size());
for (uint32 i = 0; i < m_textures_d.size(); ++i)
m_texture_views_d.set(i, m_textures_d[i]->view());
fprintf(stderr, "loading %u textures... done\n", (uint32)m_mesh.m_textures.size());
}
// checking materials
for (int32_t i = 0; i < m_mesh.getNumTriangles(); ++i)
{
const int m = m_mesh.getMaterialIndices()[i];
if (m < 0 || m >= m_mesh.getNumMaterials())
{
fprintf(stderr, "material[%u] : %u out of range\n", i, m);
exit(1);
}
}
#if 0
fprintf(stderr, "creating UV index... started\n");
{
// initialize a uv-bvh on the host
HostUVBvh uv_bvh;
build( &uv_bvh, m_mesh );
output_uv_tris( m_mesh );
// and copy it to the device
m_uv_bvh = uv_bvh;
}
fprintf(stderr, "creating UV index... done\n");
#endif
// copy to the device
m_mesh_d = m_mesh;
{
size_t mem_free, mem_tot;
cudaSetDevice(0);
cudaMemGetInfo(&mem_free, &mem_tot);
fprintf(stderr, "free device memory: %.3f GB\n", float(mem_free) / (1024 * 1024 * 1024));
}
fprintf(stderr, "creating RT index... started\n");
#if 1
m_rt_context = new RTContext();
m_rt_context->create_geometry(
m_mesh_d.getNumTriangles(),
m_mesh_d.getVertexIndices(),
m_mesh_d.getNumVertices(),
m_mesh_d.getVertexData(),
m_mesh_d.getNormalIndices(),
m_mesh_d.getNormalData(),
m_mesh_d.getTextureCoordinateIndices(),
m_mesh_d.getTextureCoordinateData(),
m_mesh_d.getMaterialIndices());
// setup the material buffer
m_rt_context->bind_buffer( "g_materials", m_mesh_d.getNumMaterials(), sizeof(MeshMaterial), m_mesh_d.m_materials.ptr(), RT_FORMAT_USER );
// setup texture buffers
//m_rt_context->bind_buffer( "g_textures", m_texture_views_d.count(), sizeof(MipMapView), m_texture_views_d.ptr(), RT_FORMAT_USER );
// perform a small test launch
//m_rt_context->launch(0,128);
#else
m_rt_context = NULL;
#endif
fprintf(stderr, "creating RT index... done\n");
const uint32 n_dimensions = 6 * 12;
const uint32 tiled_dim = 256;
fprintf(stderr, " initializing sampler: %u dimensions\n", n_dimensions);
m_sequence.setup(n_dimensions, tiled_dim);
fprintf(stderr, "initializing path sampler... started\n");
m_renderer->init(argc, argv, *m_this);
fprintf(stderr, "initializing path sampler... done\n");
{
size_t mem_free, mem_tot;
cudaSetDevice(0);
cudaMemGetInfo(&mem_free, &mem_tot);
fprintf(stderr, "free device memory: %.3f GB\n", float(mem_free) / (1024 * 1024 * 1024));
}
#if 0
cugar::host_vector<uint32_t> h_randoms(1024 * 1024);
for (uint32_t i = 0; i < 1024 * 1024; ++i)
h_randoms[i] = rand();
cugar::device_vector<uint32_t> d_randoms = h_randoms;
cugar::device_vector<uint32_t> d_vals = h_randoms;
cugar::device_vector<uint8_t> temp_storage;
cugar::radix_sort<cugar::device_tag>(1024 * 1024, cugar::raw_pointer(d_randoms), cugar::raw_pointer(d_vals), temp_storage);
for (uint32_t i = 0; i < 10; ++i)
{
d_randoms = h_randoms;
const uint32_t n_keys = (1u << (i + 1)) * 1024;
cugar::cuda::Timer timer;
timer.start();
cugar::radix_sort<cugar::device_tag>(n_keys, cugar::raw_pointer(d_randoms), cugar::raw_pointer(d_vals), temp_storage);
timer.stop();
fprintf(stderr, "%u K items : %.2fms\n", n_keys / 1024, timer.seconds() * 1000.0f);
}
#endif
}
void RenderingContextImpl::clear()
{
for (uint32_t c = 0; c < m_fb.channel_count(); ++c)
m_fb.channels[c].clear();
}
void RenderingContextImpl::update_model()
{
m_rt_context->create_geometry(
m_mesh_d.getNumTriangles(),
m_mesh_d.getVertexIndices(),
m_mesh_d.getNumVertices(),
m_mesh_d.getVertexData(),
m_mesh_d.getNormalIndices(),
m_mesh_d.getNormalData(),
m_mesh_d.getTextureCoordinateIndices(),
m_mesh_d.getTextureCoordinateData(),
m_mesh_d.getMaterialIndices());
// TODO: update m_mesh_lights if needed!
m_renderer->update_scene(*m_this);
// TODO: update the m_rt_context!
}
// register a new rendering interface type
//
uint32 RenderingContextImpl::register_renderer(const char* name, RendererFactoryFunction factory)
{
m_renderer_names.push_back( name );
m_renderer_factories.push_back( factory );
return uint32( m_renderer_factories.size() - 1 );
}
// RenderingContext display function
//
void RenderingContextImpl::render(const uint32 instance)
{
try
{
RenderingContextView renderer_view = view(instance);
// setup optix vars
m_rt_context->bind_var( "g_renderer", renderer_view );
// clear the primary Gbuffer
m_fb.gbuffer.clear();
//cudaDeviceSynchronize();
m_renderer->render(instance, *m_this);
// apply filtering, if enabled
if (m_shading_mode == kFiltered)
filter( instance );
to_rgba(renderer_view, m_rgba.ptr());
}
catch (cugar::cuda_error& error)
{
fprintf(stderr, "caught cuda error: %s\n", error.what());
exit(0);
}
}
RenderingContextView RenderingContextImpl::view(const uint32 instance)
{
RenderingContextView renderer_view(
m_camera,
(uint32)m_dir_lights_d.count(),
m_dir_lights_d.ptr(),
m_mesh_d.view(),
m_mesh_lights.view(false),
m_mesh_lights.view(true),
m_texture_views_d.ptr(),
m_ltc_size,
m_ltc_M.ptr(),
m_ltc_Minv.ptr(),
m_ltc_A.ptr(),
m_glossy_reflectance.ptr(),
m_res_x,
m_res_y,
m_aspect,
m_exposure,
m_gamma,
m_shading_rate,
m_shading_mode,
m_fb.view(),
instance );
return renderer_view;
}
// compute the scene's bbox
//
cugar::Bbox3f RenderingContextImpl::compute_bbox()
{
MeshView mesh_view = m_mesh.view();
cugar::Bbox3f bbox;
for (int32_t i = 0; i < m_mesh.getNumVertices(); ++i)
bbox.insert( load_vertex( mesh_view, i ) );
return bbox;
}
void RenderingContextImpl::filter(const uint32 instance)
{
// clear the output filter
m_fb.channels[FBufferDesc::FILTERED_C] = m_fb.channels[FBufferDesc::DIRECT_C];
FBufferChannelView output = m_fb.channels[FBufferDesc::FILTERED_C].view();
cugar::Vector3f U, V, W;
camera_frame( m_camera, m_aspect, U, V, W );
#if 1
// setup some ping-pong buffers
FBufferChannelView pingpong[2];
pingpong[0] = m_fb_temp[0].view();
pingpong[1] = m_fb_temp[1].view();
EAWParams eaw_params;
eaw_params.phi_normal = /*sqrtf(float(instance + 1)) **/ 2.0f;
eaw_params.phi_position = /*sqrtf(float(instance + 1)) **/ 1.0f;
//eaw_params.phi_color = float(instance + 1) / 20.0f;
eaw_params.phi_color = float(instance*instance + 1) / 10000.0f;
eaw_params.E = m_camera.eye;
eaw_params.U = U;
eaw_params.V = V;
eaw_params.W = W;
const uint32 n_iterations = 7;
// filter the diffuse channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::DIFFUSE_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::DIFFUSE_A].view();
filter_variance(input, m_var.ptr(), 2);
EAW(
n_iterations,
output, // destination
weight, // weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
eaw_params, pingpong);
}
// filter the specular channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::SPECULAR_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::SPECULAR_A].view();
filter_variance(input, m_var.ptr(), 2);
EAW(
n_iterations,
output, // destination
weight, // weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
eaw_params, pingpong);
}
#elif 0
XBLParams xbl_params;
xbl_params.taps = 32;
xbl_params.phi_normal = 32.0f;
xbl_params.phi_position = 1.0f;
xbl_params.phi_color = 0.0f;
//xbl_params.phi_color = float(instance*instance + 1) / 10000.0f;
//eaw_params.phi_color = float(instance*instance + 1) / 10000.0f;
xbl_params.E = m_camera.eye;
xbl_params.U = U;
xbl_params.V = V;
xbl_params.W = W;
// filter the diffuse channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::DIFFUSE_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::DIFFUSE_A].view();
filter_variance(input, m_var.ptr(), 2);
XBL(
output, // destination
FilterOp(kFilterOpDemodulateInput | kFilterOpModulateOutput | kFilterOpAddMode),
weight, // weight
1.0e-4f, // min weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
xbl_params,
21u,
1u,
m_sequence.view());
}
// filter the specular channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::SPECULAR_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::SPECULAR_A].view();
filter_variance(input, m_var.ptr(), 2);
XBL(
output, // destination
FilterOp(kFilterOpDemodulateInput | kFilterOpModulateOutput | kFilterOpAddMode),
weight, // weight
1.0e-4f, // min weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
xbl_params,
21u,
1u,
m_sequence.view());
}
#endif
}
// constructor
//
RenderingContext::RenderingContext()
{
m_impl = new RenderingContextImpl( this );
}
// initialize the renderer
//
void RenderingContext::init(int argc, char** argv)
{
m_impl->init( argc, argv );
}
// render a frame
//
// \param instance the sequence instance / frame number in a progressive render
void RenderingContext::render(const uint32 instance)
{
m_impl->render( instance );
}
// clear all framebuffers
//
void RenderingContext::clear()
{
m_impl->clear();
}
// rescale the output framebuffer by a constant
//
void RenderingContext::multiply_frame(const float scale)
{
m_impl->multiply_frame( scale );
}
// rescale the output framebuffer by n/(n-1)
//
// \param instance the sequence instance / frame number in a progressive render, used for rescaling
void RenderingContext::rescale_frame(const uint32 instance)
{
m_impl->rescale_frame( instance );
}
// clamp the output framebuffer to a given maximum
//
// \param max_value
void RenderingContext::clamp_frame(const float max_value)
{
m_impl->clamp_frame( max_value );
}
// update the variance estimates
//
// \param instance the sequence instance / frame number in a progressive render, used for rescaling
void RenderingContext::update_variances(const uint32 instance)
{
m_impl->update_variances( instance );
}
// update the internal data-structures (e.g. BVHs) associated to the geometry
//
void RenderingContext::update_model()
{
m_impl->update_model();
}
// perform filtering
//
// \param instance the sequence instance / frame number in a progressive render
void RenderingContext::filter(const uint32 instance)
{
m_impl->filter( instance );
}
// return the current output resolution
//
uint2 RenderingContext::res() const { return m_impl->res(); }
// return a view of the renderer
//
RenderingContextView RenderingContext::view(const uint32 instance) { return m_impl->view( instance ); }
// return the camera
//
Camera& RenderingContext::get_camera() { return m_impl->get_camera(); }
// return the directional light count
//
uint32 RenderingContext::get_directional_light_count() const
{
return (uint32)m_impl->m_dir_lights_d.count();
}
// return the host-side directional lights
//
const DirectionalLight* RenderingContext::get_host_directional_lights() const
{
return m_impl->m_dir_lights_h.ptr();
}
// return the device-side directional lights
//
const DirectionalLight* RenderingContext::get_device_directional_lights() const
{
return m_impl->m_dir_lights_d.ptr();
}
// set the number of directional lights
//
void RenderingContext::set_directional_light_count(const uint32 count)
{
m_impl->m_dir_lights_h.alloc( count );
m_impl->m_dir_lights_d.alloc( count );
}
// set a directional light
//
void RenderingContext::set_directional_light(const uint32 i, const DirectionalLight& light)
{
m_impl->m_dir_lights_h.set( i, light );
m_impl->m_dir_lights_d.set( i, light );
}
// return the target resolution
//
uint2 RenderingContext::get_res() const { return m_impl->get_res(); }
// return the target aspect ratio
//
float RenderingContext::get_aspect_ratio() const { return m_impl->get_aspect_ratio(); }
// return the target exposure
//
void RenderingContext::set_aspect_ratio(const float v) { m_impl->m_aspect = v; }
// return the target exposure
//
float RenderingContext::get_exposure() const { return m_impl->get_exposure(); }
// set the target exposure
//
void RenderingContext::set_exposure(const float v) { m_impl->m_exposure = v; }
// return the target gamma
//
float RenderingContext::get_gamma() const { return m_impl->m_gamma; }
// set the target gamma
//
void RenderingContext::set_gamma(const float v) { m_impl->m_gamma = v; }
// return the shading mode
//
ShadingMode& RenderingContext::get_shading_mode() { return m_impl->m_shading_mode; }
// return the frame buffer
//
FBufferStorage& RenderingContext::get_frame_buffer() { return m_impl->m_fb; }
// return the frame buffer
//
uint8* RenderingContext::get_device_rgba_buffer() { return m_impl->m_rgba.ptr(); }
// return the number of textures
//
uint32 RenderingContext::get_texture_count() const { return uint32( m_impl->m_textures_h.size() ); }
// return the scene's host-side textures
//
RenderingContext::HostMipMapStoragePtr* RenderingContext::get_host_textures() { return &m_impl->m_textures_h.front(); }
// return the scene's device-side textures
//
RenderingContext::DeviceMipMapStoragePtr* RenderingContext::get_device_textures() { return &m_impl->m_textures_d.front(); }
// return the scene's host-side textures
//
MipMapView* RenderingContext::get_host_texture_views() { return m_impl->get_host_texture_views(); }
// return the scene's device-side textures
//
MipMapView* RenderingContext::get_device_texture_views() { return m_impl->get_device_texture_views(); }
// return the scene's host-side mesh
//
MeshStorage& RenderingContext::get_host_mesh() { return m_impl->get_host_mesh(); }
// return the scene's device-side mesh
//
DeviceMeshStorage& RenderingContext::get_device_mesh() { return m_impl->get_device_mesh(); }
// return the scene's device-side mesh emitters
//
MeshLightsStorage& RenderingContext::get_mesh_lights() { return m_impl->get_mesh_lights(); }
// return the ray tracing context
//
RTContext* RenderingContext::get_rt_context() const { return m_impl->get_rt_context(); }
// return the sampling sequence
//
TiledSequence& RenderingContext::get_sequence() { return m_impl->m_sequence; }
// return the renderer
//
RendererInterface* RenderingContext::get_renderer() const { return m_impl->get_renderer(); }
// register a new rendering interface type
//
uint32 RenderingContext::register_renderer(const char* name, RendererFactoryFunction factory)
{
return m_impl->register_renderer( name, factory );
}
// compute the scene's bbox
//
cugar::Bbox3f RenderingContext::compute_bbox() { return m_impl->compute_bbox(); }
|
4887d22864264e2e71a4abbc0fad5b52bdb374b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows,
int numCColumns) {
//@@ Insert code to implement matrix multiplication here
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if((Row < numCRows) && (Col < numCColumns)){
float Pvalue = 0;
for(int k = 0;k<numAColumns;++k){
Pvalue += A[Row*numAColumns+k]*B[Col+k*numCColumns];}
C[Row*numCColumns+Col] = Pvalue;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
wbTime_stop(Generic, "Importing data and creating memory on host");
hostC = (float *)malloc(numCRows*numCColumns * sizeof(float));
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc((void**) &deviceA,numARows*numAColumns*sizeof(float));
hipMalloc((void**) &deviceB,numBRows*numBColumns*sizeof(float));
hipMalloc((void**) &deviceC,numCRows*numCColumns*sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceA,hostA,numARows*numAColumns*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceB,hostB,numBRows*numBColumns*sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
int WIDTH = 4;
dim3 dimGrid(ceil(1.0*numCColumns/WIDTH),ceil(1.0*numCRows/WIDTH),1); //inverse col and row
dim3 dimBlock(WIDTH,WIDTH,1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMultiply), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows,
numCColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostC,deviceC,numCRows*numCColumns*sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
} | 4887d22864264e2e71a4abbc0fad5b52bdb374b4.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows,
int numCColumns) {
//@@ Insert code to implement matrix multiplication here
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if((Row < numCRows) && (Col < numCColumns)){
float Pvalue = 0;
for(int k = 0;k<numAColumns;++k){
Pvalue += A[Row*numAColumns+k]*B[Col+k*numCColumns];}
C[Row*numCColumns+Col] = Pvalue;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
wbTime_stop(Generic, "Importing data and creating memory on host");
hostC = (float *)malloc(numCRows*numCColumns * sizeof(float));
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void**) &deviceA,numARows*numAColumns*sizeof(float));
cudaMalloc((void**) &deviceB,numBRows*numBColumns*sizeof(float));
cudaMalloc((void**) &deviceC,numCRows*numCColumns*sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceA,hostA,numARows*numAColumns*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,hostB,numBRows*numBColumns*sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
int WIDTH = 4;
dim3 dimGrid(ceil(1.0*numCColumns/WIDTH),ceil(1.0*numCRows/WIDTH),1); //inverse col and row
dim3 dimBlock(WIDTH,WIDTH,1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiply<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows,
numCColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC,deviceC,numCRows*numCColumns*sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
} |
0ef44f158e94b0402c759871877a1ad6e9bddc7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pin_object.h"
#include "utils/simple_serializer.h"
#include <mirheo/core/pvs/object_vector.h>
#include <mirheo/core/pvs/rigid_object_vector.h>
#include <mirheo/core/pvs/views/rov.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/cuda_rng.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/mpi_types.h>
#include <mirheo/core/utils/quaternion.h>
namespace mirheo
{
namespace pin_object_kernels
{
__global__ void restrictVelocities(OVview view, real3 targetVelocity, real4 *totForces)
{
int objId = blockIdx.x;
__shared__ real3 objTotForce, objVelocity;
objTotForce = make_real3(0.0_r);
objVelocity = make_real3(0.0_r);
__syncthreads();
// Find total force acting on the object and its velocity
real3 myf = make_real3(0), myv = make_real3(0);
for (int pid = threadIdx.x; pid < view.objSize; pid += blockDim.x)
{
myf += Real3_int(view.forces[pid + objId*view.objSize]).v;
myv += Real3_int(view.readVelocity(pid + objId*view.objSize)).v;
}
myf = warpReduce(myf, [] (real a, real b) { return a+b; });
myv = warpReduce(myv, [] (real a, real b) { return a+b; });
if (laneId() == 0)
{
atomicAdd(&objTotForce, myf);
atomicAdd(&objVelocity, myv / view.objSize); // Average, not simply sum
}
__syncthreads();
// Now only leave the components we need and save the force
if (threadIdx.x == 0)
{
// This is the velocity correction
objVelocity = targetVelocity - objVelocity;
if (targetVelocity.x == PinObjectPlugin::Unrestricted) { objTotForce.x = 0; objVelocity.x = 0; }
if (targetVelocity.y == PinObjectPlugin::Unrestricted) { objTotForce.y = 0; objVelocity.y = 0; }
if (targetVelocity.z == PinObjectPlugin::Unrestricted) { objTotForce.z = 0; objVelocity.z = 0; }
totForces[view.ids[objId]] += Real3_int(objTotForce, 0).toReal4();
objTotForce /= view.objSize;
}
__syncthreads();
// Finally change the original forces and velocities
// Velocities should be preserved anyways, only changed in the very
// beginning of the simulation
for (int pid = threadIdx.x; pid < view.objSize; pid += blockDim.x)
{
view.forces [pid + objId*view.objSize] -= Real3_int(objTotForce, 0).toReal4();
view.velocities[pid + objId*view.objSize] += Real3_int(objVelocity, 0).toReal4();
}
}
__global__ void restrictRigidMotion(ROVviewWithOldMotion view, real3 targetVelocity, real3 targetOmega, real dt, real4 *totForces, real4 *totTorques)
{
int objId = blockIdx.x * blockDim.x + threadIdx.x;
if (objId >= view.nObjects) return;
auto motion = view.motions [objId];
auto old_motion = view.old_motions[objId];
int globObjId = view.ids[objId];
#define VELOCITY_PER_DIM(dim) \
if (targetVelocity.dim != PinObjectPlugin::Unrestricted) \
{ \
totForces[globObjId].dim += old_motion.force.dim; \
motion.r.dim = old_motion.r.dim + targetVelocity.dim*dt; \
motion.vel.dim = targetVelocity.dim; \
}
VELOCITY_PER_DIM(x);
VELOCITY_PER_DIM(y);
VELOCITY_PER_DIM(z);
#undef VELOCITY_PER_DIM
// First filter out the invalid values
auto adjustedTargetOmega = old_motion.omega;
if (targetOmega.x != PinObjectPlugin::Unrestricted) adjustedTargetOmega.x = targetOmega.x;
if (targetOmega.y != PinObjectPlugin::Unrestricted) adjustedTargetOmega.y = targetOmega.y;
if (targetOmega.z != PinObjectPlugin::Unrestricted) adjustedTargetOmega.z = targetOmega.z;
// Next compute the corrected dq_dt and revert if necessary
auto dq_dt = old_motion.q.timeDerivative(adjustedTargetOmega);
#define OMEGA_PER_DIM(dim) \
if (targetOmega.dim != PinObjectPlugin::Unrestricted) \
{ \
totTorques[globObjId].dim += old_motion.torque.dim; \
motion.q.dim = old_motion.q.dim + dq_dt.dim*dt; \
motion.omega.dim = targetOmega.dim; \
}
OMEGA_PER_DIM(x);
OMEGA_PER_DIM(y);
OMEGA_PER_DIM(z);
#undef OMEGA_PER_DIM
motion.q.normalize();
view.motions[objId] = motion;
}
} // namespace pin_object_kernels::
PinObjectPlugin::PinObjectPlugin(const MirState *state, std::string name, std::string ovName, real3 translation, real3 rotation, int reportEvery) :
SimulationPlugin(state, name),
ovName_(ovName),
translation_(translation),
rotation_(rotation),
reportEvery_(reportEvery)
{}
void PinObjectPlugin::setup(Simulation* simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
ov_ = simulation->getOVbyNameOrDie(ovName_);
const int myNObj = ov_->local()->getNumObjects();
int totObjs {0};
MPI_Check( MPI_Allreduce(&myNObj, &totObjs, 1, MPI_INT, MPI_SUM, comm) );
forces_.resize_anew(totObjs);
forces_.clear(defaultStream);
// Also check torques if object is rigid and if we need to restrict rotation
rov_ = dynamic_cast<RigidObjectVector*>(ov_);
if (rov_ != nullptr && (rotation_.x != Unrestricted || rotation_.y != Unrestricted || rotation_.z != Unrestricted))
{
torques_.resize_anew(totObjs);
torques_.clear(defaultStream);
}
info("Plugin '%s' is setup for OV '%s' and will impose the following velocity: [%s %s %s]; and following rotation: [%s %s %s]",
getCName(), ovName_.c_str(),
translation_.x == Unrestricted ? "?" : std::to_string(translation_.x).c_str(),
translation_.y == Unrestricted ? "?" : std::to_string(translation_.y).c_str(),
translation_.z == Unrestricted ? "?" : std::to_string(translation_.z).c_str(),
rotation_.x == Unrestricted ? "?" : std::to_string(rotation_.x).c_str(),
rotation_.y == Unrestricted ? "?" : std::to_string(rotation_.y).c_str(),
rotation_.z == Unrestricted ? "?" : std::to_string(rotation_.z).c_str() );
}
void PinObjectPlugin::handshake()
{
SimpleSerializer::serialize(sendBuffer_, ovName_);
_send(sendBuffer_);
}
void PinObjectPlugin::beforeIntegration(hipStream_t stream)
{
// If the object is not rigid, modify the forces
if (rov_ == nullptr)
{
debug("Restricting motion of OV '%s' as per plugin '%s'", ovName_.c_str(), getCName());
const int nthreads = 128;
OVview view(ov_, ov_->local());
SAFE_KERNEL_LAUNCH(
pin_object_kernels::restrictVelocities,
view.nObjects, nthreads, 0, stream,
view, translation_, forces_.devPtr() );
}
}
void PinObjectPlugin::afterIntegration(hipStream_t stream)
{
// If the object IS rigid, modify forces and torques
if (rov_ != nullptr)
{
debug("Restricting rigid motion of OV '%s' as per plugin '%s'", ovName_.c_str(), getCName());
const int nthreads = 32;
ROVviewWithOldMotion view(rov_, rov_->local());
SAFE_KERNEL_LAUNCH(
pin_object_kernels::restrictRigidMotion,
getNblocks(view.nObjects, nthreads), nthreads, 0, stream,
view, translation_, rotation_, getState()->dt,
forces_.devPtr(), torques_.devPtr() );
}
}
void PinObjectPlugin::serializeAndSend(hipStream_t stream)
{
count_++;
if (count_ % reportEvery_ != 0) return;
forces_.downloadFromDevice(stream);
if (rov_ != nullptr)
torques_.downloadFromDevice(stream);
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, getState()->currentTime, reportEvery_, forces_, torques_);
_send(sendBuffer_);
forces_.clearDevice(stream);
torques_.clearDevice(stream);
}
ReportPinObjectPlugin::ReportPinObjectPlugin(std::string name, std::string path) :
PostprocessPlugin(name),
path_(makePath(path))
{}
void ReportPinObjectPlugin::setup(const MPI_Comm& comm, const MPI_Comm& interComm)
{
PostprocessPlugin::setup(comm, interComm);
activated_ = createFoldersCollective(comm, path_);
}
void ReportPinObjectPlugin::handshake()
{
auto req = waitData();
MPI_Check( MPI_Wait(&req, MPI_STATUS_IGNORE) );
recv();
std::string ovName;
SimpleSerializer::deserialize(data_, ovName);
if (activated_ && rank_ == 0)
{
std::string fname = path_ + ovName + ".txt";
auto status = fout_.open(fname, "w" );
if (status != FileWrapper::Status::Success)
die("could not open file '%s'", fname.c_str());
}
}
void ReportPinObjectPlugin::deserialize()
{
std::vector<real4> forces, torques;
MirState::TimeType currentTime;
int nsamples;
SimpleSerializer::deserialize(data_, currentTime, nsamples, forces, torques);
MPI_Check( MPI_Reduce( (rank_ == 0 ? MPI_IN_PLACE : forces.data()), forces.data(), forces.size()*4, getMPIFloatType<real>(), MPI_SUM, 0, comm_) );
MPI_Check( MPI_Reduce( (rank_ == 0 ? MPI_IN_PLACE : torques.data()), torques.data(), torques.size()*4, getMPIFloatType<real>(), MPI_SUM, 0, comm_) );
if (activated_ && rank_ == 0)
{
for (size_t i = 0; i < forces.size(); ++i)
{
forces[i] /= nsamples;
fprintf(fout_.get(), "%lu %f %f %f %f",
i, currentTime, forces[i].x, forces[i].y, forces[i].z);
if (i < torques.size())
{
torques[i] /= nsamples;
fprintf(fout_.get(), " %f %f %f", torques[i].x, torques[i].y, torques[i].z);
}
fprintf(fout_.get(), "\n");
}
fflush(fout_.get());
}
}
} // namespace mirheo
| 0ef44f158e94b0402c759871877a1ad6e9bddc7a.cu | #include "pin_object.h"
#include "utils/simple_serializer.h"
#include <mirheo/core/pvs/object_vector.h>
#include <mirheo/core/pvs/rigid_object_vector.h>
#include <mirheo/core/pvs/views/rov.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/cuda_rng.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/mpi_types.h>
#include <mirheo/core/utils/quaternion.h>
namespace mirheo
{
namespace pin_object_kernels
{
__global__ void restrictVelocities(OVview view, real3 targetVelocity, real4 *totForces)
{
int objId = blockIdx.x;
__shared__ real3 objTotForce, objVelocity;
objTotForce = make_real3(0.0_r);
objVelocity = make_real3(0.0_r);
__syncthreads();
// Find total force acting on the object and its velocity
real3 myf = make_real3(0), myv = make_real3(0);
for (int pid = threadIdx.x; pid < view.objSize; pid += blockDim.x)
{
myf += Real3_int(view.forces[pid + objId*view.objSize]).v;
myv += Real3_int(view.readVelocity(pid + objId*view.objSize)).v;
}
myf = warpReduce(myf, [] (real a, real b) { return a+b; });
myv = warpReduce(myv, [] (real a, real b) { return a+b; });
if (laneId() == 0)
{
atomicAdd(&objTotForce, myf);
atomicAdd(&objVelocity, myv / view.objSize); // Average, not simply sum
}
__syncthreads();
// Now only leave the components we need and save the force
if (threadIdx.x == 0)
{
// This is the velocity correction
objVelocity = targetVelocity - objVelocity;
if (targetVelocity.x == PinObjectPlugin::Unrestricted) { objTotForce.x = 0; objVelocity.x = 0; }
if (targetVelocity.y == PinObjectPlugin::Unrestricted) { objTotForce.y = 0; objVelocity.y = 0; }
if (targetVelocity.z == PinObjectPlugin::Unrestricted) { objTotForce.z = 0; objVelocity.z = 0; }
totForces[view.ids[objId]] += Real3_int(objTotForce, 0).toReal4();
objTotForce /= view.objSize;
}
__syncthreads();
// Finally change the original forces and velocities
// Velocities should be preserved anyways, only changed in the very
// beginning of the simulation
for (int pid = threadIdx.x; pid < view.objSize; pid += blockDim.x)
{
view.forces [pid + objId*view.objSize] -= Real3_int(objTotForce, 0).toReal4();
view.velocities[pid + objId*view.objSize] += Real3_int(objVelocity, 0).toReal4();
}
}
__global__ void restrictRigidMotion(ROVviewWithOldMotion view, real3 targetVelocity, real3 targetOmega, real dt, real4 *totForces, real4 *totTorques)
{
int objId = blockIdx.x * blockDim.x + threadIdx.x;
if (objId >= view.nObjects) return;
auto motion = view.motions [objId];
auto old_motion = view.old_motions[objId];
int globObjId = view.ids[objId];
#define VELOCITY_PER_DIM(dim) \
if (targetVelocity.dim != PinObjectPlugin::Unrestricted) \
{ \
totForces[globObjId].dim += old_motion.force.dim; \
motion.r.dim = old_motion.r.dim + targetVelocity.dim*dt; \
motion.vel.dim = targetVelocity.dim; \
}
VELOCITY_PER_DIM(x);
VELOCITY_PER_DIM(y);
VELOCITY_PER_DIM(z);
#undef VELOCITY_PER_DIM
// First filter out the invalid values
auto adjustedTargetOmega = old_motion.omega;
if (targetOmega.x != PinObjectPlugin::Unrestricted) adjustedTargetOmega.x = targetOmega.x;
if (targetOmega.y != PinObjectPlugin::Unrestricted) adjustedTargetOmega.y = targetOmega.y;
if (targetOmega.z != PinObjectPlugin::Unrestricted) adjustedTargetOmega.z = targetOmega.z;
// Next compute the corrected dq_dt and revert if necessary
auto dq_dt = old_motion.q.timeDerivative(adjustedTargetOmega);
#define OMEGA_PER_DIM(dim) \
if (targetOmega.dim != PinObjectPlugin::Unrestricted) \
{ \
totTorques[globObjId].dim += old_motion.torque.dim; \
motion.q.dim = old_motion.q.dim + dq_dt.dim*dt; \
motion.omega.dim = targetOmega.dim; \
}
OMEGA_PER_DIM(x);
OMEGA_PER_DIM(y);
OMEGA_PER_DIM(z);
#undef OMEGA_PER_DIM
motion.q.normalize();
view.motions[objId] = motion;
}
} // namespace pin_object_kernels::
PinObjectPlugin::PinObjectPlugin(const MirState *state, std::string name, std::string ovName, real3 translation, real3 rotation, int reportEvery) :
SimulationPlugin(state, name),
ovName_(ovName),
translation_(translation),
rotation_(rotation),
reportEvery_(reportEvery)
{}
void PinObjectPlugin::setup(Simulation* simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
ov_ = simulation->getOVbyNameOrDie(ovName_);
const int myNObj = ov_->local()->getNumObjects();
int totObjs {0};
MPI_Check( MPI_Allreduce(&myNObj, &totObjs, 1, MPI_INT, MPI_SUM, comm) );
forces_.resize_anew(totObjs);
forces_.clear(defaultStream);
// Also check torques if object is rigid and if we need to restrict rotation
rov_ = dynamic_cast<RigidObjectVector*>(ov_);
if (rov_ != nullptr && (rotation_.x != Unrestricted || rotation_.y != Unrestricted || rotation_.z != Unrestricted))
{
torques_.resize_anew(totObjs);
torques_.clear(defaultStream);
}
info("Plugin '%s' is setup for OV '%s' and will impose the following velocity: [%s %s %s]; and following rotation: [%s %s %s]",
getCName(), ovName_.c_str(),
translation_.x == Unrestricted ? "?" : std::to_string(translation_.x).c_str(),
translation_.y == Unrestricted ? "?" : std::to_string(translation_.y).c_str(),
translation_.z == Unrestricted ? "?" : std::to_string(translation_.z).c_str(),
rotation_.x == Unrestricted ? "?" : std::to_string(rotation_.x).c_str(),
rotation_.y == Unrestricted ? "?" : std::to_string(rotation_.y).c_str(),
rotation_.z == Unrestricted ? "?" : std::to_string(rotation_.z).c_str() );
}
void PinObjectPlugin::handshake()
{
SimpleSerializer::serialize(sendBuffer_, ovName_);
_send(sendBuffer_);
}
void PinObjectPlugin::beforeIntegration(cudaStream_t stream)
{
// If the object is not rigid, modify the forces
if (rov_ == nullptr)
{
debug("Restricting motion of OV '%s' as per plugin '%s'", ovName_.c_str(), getCName());
const int nthreads = 128;
OVview view(ov_, ov_->local());
SAFE_KERNEL_LAUNCH(
pin_object_kernels::restrictVelocities,
view.nObjects, nthreads, 0, stream,
view, translation_, forces_.devPtr() );
}
}
void PinObjectPlugin::afterIntegration(cudaStream_t stream)
{
// If the object IS rigid, modify forces and torques
if (rov_ != nullptr)
{
debug("Restricting rigid motion of OV '%s' as per plugin '%s'", ovName_.c_str(), getCName());
const int nthreads = 32;
ROVviewWithOldMotion view(rov_, rov_->local());
SAFE_KERNEL_LAUNCH(
pin_object_kernels::restrictRigidMotion,
getNblocks(view.nObjects, nthreads), nthreads, 0, stream,
view, translation_, rotation_, getState()->dt,
forces_.devPtr(), torques_.devPtr() );
}
}
void PinObjectPlugin::serializeAndSend(cudaStream_t stream)
{
count_++;
if (count_ % reportEvery_ != 0) return;
forces_.downloadFromDevice(stream);
if (rov_ != nullptr)
torques_.downloadFromDevice(stream);
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, getState()->currentTime, reportEvery_, forces_, torques_);
_send(sendBuffer_);
forces_.clearDevice(stream);
torques_.clearDevice(stream);
}
ReportPinObjectPlugin::ReportPinObjectPlugin(std::string name, std::string path) :
PostprocessPlugin(name),
path_(makePath(path))
{}
void ReportPinObjectPlugin::setup(const MPI_Comm& comm, const MPI_Comm& interComm)
{
PostprocessPlugin::setup(comm, interComm);
activated_ = createFoldersCollective(comm, path_);
}
void ReportPinObjectPlugin::handshake()
{
auto req = waitData();
MPI_Check( MPI_Wait(&req, MPI_STATUS_IGNORE) );
recv();
std::string ovName;
SimpleSerializer::deserialize(data_, ovName);
if (activated_ && rank_ == 0)
{
std::string fname = path_ + ovName + ".txt";
auto status = fout_.open(fname, "w" );
if (status != FileWrapper::Status::Success)
die("could not open file '%s'", fname.c_str());
}
}
void ReportPinObjectPlugin::deserialize()
{
std::vector<real4> forces, torques;
MirState::TimeType currentTime;
int nsamples;
SimpleSerializer::deserialize(data_, currentTime, nsamples, forces, torques);
MPI_Check( MPI_Reduce( (rank_ == 0 ? MPI_IN_PLACE : forces.data()), forces.data(), forces.size()*4, getMPIFloatType<real>(), MPI_SUM, 0, comm_) );
MPI_Check( MPI_Reduce( (rank_ == 0 ? MPI_IN_PLACE : torques.data()), torques.data(), torques.size()*4, getMPIFloatType<real>(), MPI_SUM, 0, comm_) );
if (activated_ && rank_ == 0)
{
for (size_t i = 0; i < forces.size(); ++i)
{
forces[i] /= nsamples;
fprintf(fout_.get(), "%lu %f %f %f %f",
i, currentTime, forces[i].x, forces[i].y, forces[i].z);
if (i < torques.size())
{
torques[i] /= nsamples;
fprintf(fout_.get(), " %f %f %f", torques[i].x, torques[i].y, torques[i].z);
}
fprintf(fout_.get(), "\n");
}
fflush(fout_.get());
}
}
} // namespace mirheo
|
a2edec018af46dfe11b6a1a4b0ddff6e497adb27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header.h"
#include <iostream>
#include <string>
#include <fstream>
#include<stdlib.h>
#include <stdio.h>
#include<time.h>
#include<hip/device_functions.h>
#include<cuda.h>
#include<math.h>
using namespace std;
#define HOMEOSTASIS_CONSTANT 150
//currently using LIF for spike learning
__global__ void update_threshold (Neuron *NeuronList, int network_size, float *log_total_spike, float target_frequency, int time){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index>=network_size){
return;
}
if(NeuronList[index].type==2){
float frequency_mean = log_total_spike[index]/time;
float delta_thres = HOMEOSTASIS_CONSTANT*(frequency_mean-target_frequency);
NeuronList[index].param[1] = NeuronList[index].param[1] + delta_thres;
//printf("NeuronNo%d:%f] ", index, delta_thres);
}
else{
return;
}
}
__global__ void lateral_inhibition (Neuron *NeuronList, int network_size, int inhibit_time){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index>=network_size){
return;
}
if(NeuronList[index].type==4){
return;
}
if(NeuronList[index].state[2]>0.1){
return;
}
//NeuronList[index].state[7] = inhibit_time; //
NeuronList[index].state[0] = NeuronList[index].state[0] - 7;//NeuronList[index].param[2]; //change mem potential to reset_value
//float *result = std::find(std::begin(NeuronList[index].state), std::end(NeuronList[index].state), 123);
printf("#");
}
__global__ void lateral_inhibition_2 (Neuron *NeuronList, int network_size, int inhibit_time, float start_depth, float end_depth){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
//printf("%d %d| ", index, network_size);
if(index>=network_size){
return;
}
if(NeuronList[index].type==4){
return;
}
if(NeuronList[index].state[2]>0.1){
//printf("******************%d*****************\n", index);
return;
}
if(NeuronList[index].param[7]<start_depth||NeuronList[index].param[7]>end_depth){
//printf("StartDepth:%f_End:%f__current:%f||", start_depth, end_depth, NeuronList[index].param[7]);
return;
}
//printf("%d | ", index);
NeuronList[index].state[7] = inhibit_time; //
NeuronList[index].state[0] = NeuronList[index].state[0] - 7;//NeuronList[index].param[2]; //change mem potential to reset_value
//float *result = std::find(std::begin(NeuronList[index].state), std::end(NeuronList[index].state), 123);
}
__global__ void read_learning_output (Neuron *NeuronList, int network_size){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index>=network_size){
return;
}
//printf("|");
int i = 0;
while(NeuronList[index].connected_in[i] > 0.1){
if(NeuronList[index].connected_weight[i]>1.0){
printf("connection%d---->%d_has_changed_weight:%f\n",i,index,NeuronList[index].connected_weight[i]);
}
i++;
}
}
__global__ void lateral_inhibition_CNN (Neuron *NeuronList, int network_size, int inhibit_time, float *log_spike){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index>=network_size){
//printf("network size: %d, Return on index: %d", network_size, index);
return;
}
if(NeuronList[index].type==4||NeuronList[index].type==5){
//printf("1.network size: %d, Return on index: %d", network_size, index);
return;
}
if(NeuronList[index].state[2]>0.1){
//printf("2.network size: %d, Return on index: %d", network_size, index);
return;
}
int fire_neuron_depth = (int)NeuronList[index].param[7];
if(log_spike[fire_neuron_depth]<0.5){
//printf("Neuron_index: %d, Return on: depth of %d has log value: %f.\n", index, fire_neuron_depth, log_spike[fire_neuron_depth]);
//return;
}
//NeuronList[index].state[7] = inhibit_time; //
NeuronList[index].state[0] = NeuronList[index].state[0] - 3;//NeuronList[index].param[2]; //change mem potential to reset_value
//float *result = std::find(std::begin(NeuronList[index].state), std::end(NeuronList[index].state), 123);
//printf("Depth of %f has log value: %f.", fire_neuron_depth, log_spike[fire_neuron_depth]);
}
void spiking_learning_drive(Neuron *NeuronList, int network_size, int inhibit_time, float *log_total_spike, float target_frequency, int time, float *log_spike, int current_layer, int function_select){
int SIZE_PER_SIDE = sqrt(network_size)+1;
dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock );
dim3 dimGrid( (SIZE_PER_SIDE/dimBlock.x+1), (SIZE_PER_SIDE/dimBlock.y+1));
int output_neuron_size = OUTPUT_LAYER_NEURON_NUM - 1;
if(function_select==0){//run lateral_inhibition
hipLaunchKernelGGL(( lateral_inhibition), dim3(dimGrid), dim3(dimBlock), 0, 0, NeuronList, output_neuron_size, inhibit_time);
}
else if(function_select==1){//run update threshold
//printf("\nTIME is: %d\n", time);
hipLaunchKernelGGL(( update_threshold), dim3(dimGrid), dim3(dimBlock), 0, 0, NeuronList, network_size, log_total_spike, target_frequency, time);
}
else if(function_select==2){
hipLaunchKernelGGL(( read_learning_output), dim3(dimGrid), dim3(dimBlock), 0, 0, NeuronList, network_size);
}
else if(function_select==3){
hipLaunchKernelGGL(( lateral_inhibition_CNN), dim3(dimGrid), dim3(dimBlock), 0, 0, NeuronList, network_size, inhibit_time, log_spike);
}
}
void spiking_learning_drive(Neuron *NeuronList, int network_size, int inhibit_time, float *log_total_spike, float target_frequency, int time, float *log_spike, int current_layer, CNN_struct *CNN_setttings, int function_select){
int SIZE_PER_SIDE = sqrt(network_size)+1;
dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock );
dim3 dimGrid( (SIZE_PER_SIDE/dimBlock.x+1), (SIZE_PER_SIDE/dimBlock.y+1));
int output_neuron_size = OUTPUT_LAYER_NEURON_NUM - 1;
if(function_select==0){//run lateral_inhibition
hipLaunchKernelGGL(( lateral_inhibition), dim3(dimGrid), dim3(dimBlock), 0, 0, NeuronList, output_neuron_size, inhibit_time);
}
else if(function_select==1){//run update threshold
//printf("\nTIME is: %d\n", time);
hipLaunchKernelGGL(( update_threshold), dim3(dimGrid), dim3(dimBlock), 0, 0, NeuronList, network_size, log_total_spike, target_frequency, time);
}
else if(function_select==2){
hipLaunchKernelGGL(( read_learning_output), dim3(dimGrid), dim3(dimBlock), 0, 0, NeuronList, network_size);
}
else if(function_select==3){
hipLaunchKernelGGL(( lateral_inhibition_CNN), dim3(dimGrid), dim3(dimBlock), 0, 0, NeuronList, network_size, inhibit_time, log_spike);
}
else if(function_select==4){
//printf("\nNEW INHIB RUN\n\n");
float start_depth = CNN_setttings->layer[current_layer].first_depth_id - 0.1;
float end_depth = CNN_setttings->layer[current_layer].last_depth_id + 0.1;
//printf("Start_depth: %f, end_depth: %f||", start_depth, end_depth);
hipLaunchKernelGGL(( lateral_inhibition_2), dim3(dimGrid), dim3(dimBlock), 0, 0, NeuronList, network_size, inhibit_time, start_depth, end_depth);
}
}
| a2edec018af46dfe11b6a1a4b0ddff6e497adb27.cu | #include "header.h"
#include <iostream>
#include <string>
#include <fstream>
#include<stdlib.h>
#include <stdio.h>
#include<time.h>
#include<device_functions.h>
#include<cuda.h>
#include<math.h>
using namespace std;
#define HOMEOSTASIS_CONSTANT 150
//currently using LIF for spike learning
__global__ void update_threshold (Neuron *NeuronList, int network_size, float *log_total_spike, float target_frequency, int time){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index>=network_size){
return;
}
if(NeuronList[index].type==2){
float frequency_mean = log_total_spike[index]/time;
float delta_thres = HOMEOSTASIS_CONSTANT*(frequency_mean-target_frequency);
NeuronList[index].param[1] = NeuronList[index].param[1] + delta_thres;
//printf("NeuronNo%d:%f] ", index, delta_thres);
}
else{
return;
}
}
__global__ void lateral_inhibition (Neuron *NeuronList, int network_size, int inhibit_time){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index>=network_size){
return;
}
if(NeuronList[index].type==4){
return;
}
if(NeuronList[index].state[2]>0.1){
return;
}
//NeuronList[index].state[7] = inhibit_time; //
NeuronList[index].state[0] = NeuronList[index].state[0] - 7;//NeuronList[index].param[2]; //change mem potential to reset_value
//float *result = std::find(std::begin(NeuronList[index].state), std::end(NeuronList[index].state), 123);
printf("#");
}
__global__ void lateral_inhibition_2 (Neuron *NeuronList, int network_size, int inhibit_time, float start_depth, float end_depth){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
//printf("%d %d| ", index, network_size);
if(index>=network_size){
return;
}
if(NeuronList[index].type==4){
return;
}
if(NeuronList[index].state[2]>0.1){
//printf("******************%d*****************\n", index);
return;
}
if(NeuronList[index].param[7]<start_depth||NeuronList[index].param[7]>end_depth){
//printf("StartDepth:%f_End:%f__current:%f||", start_depth, end_depth, NeuronList[index].param[7]);
return;
}
//printf("%d | ", index);
NeuronList[index].state[7] = inhibit_time; //
NeuronList[index].state[0] = NeuronList[index].state[0] - 7;//NeuronList[index].param[2]; //change mem potential to reset_value
//float *result = std::find(std::begin(NeuronList[index].state), std::end(NeuronList[index].state), 123);
}
__global__ void read_learning_output (Neuron *NeuronList, int network_size){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index>=network_size){
return;
}
//printf("|");
int i = 0;
while(NeuronList[index].connected_in[i] > 0.1){
if(NeuronList[index].connected_weight[i]>1.0){
printf("connection%d---->%d_has_changed_weight:%f\n",i,index,NeuronList[index].connected_weight[i]);
}
i++;
}
}
__global__ void lateral_inhibition_CNN (Neuron *NeuronList, int network_size, int inhibit_time, float *log_spike){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index>=network_size){
//printf("network size: %d, Return on index: %d", network_size, index);
return;
}
if(NeuronList[index].type==4||NeuronList[index].type==5){
//printf("1.network size: %d, Return on index: %d", network_size, index);
return;
}
if(NeuronList[index].state[2]>0.1){
//printf("2.network size: %d, Return on index: %d", network_size, index);
return;
}
int fire_neuron_depth = (int)NeuronList[index].param[7];
if(log_spike[fire_neuron_depth]<0.5){
//printf("Neuron_index: %d, Return on: depth of %d has log value: %f.\n", index, fire_neuron_depth, log_spike[fire_neuron_depth]);
//return;
}
//NeuronList[index].state[7] = inhibit_time; //
NeuronList[index].state[0] = NeuronList[index].state[0] - 3;//NeuronList[index].param[2]; //change mem potential to reset_value
//float *result = std::find(std::begin(NeuronList[index].state), std::end(NeuronList[index].state), 123);
//printf("Depth of %f has log value: %f.", fire_neuron_depth, log_spike[fire_neuron_depth]);
}
void spiking_learning_drive(Neuron *NeuronList, int network_size, int inhibit_time, float *log_total_spike, float target_frequency, int time, float *log_spike, int current_layer, int function_select){
int SIZE_PER_SIDE = sqrt(network_size)+1;
dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock );
dim3 dimGrid( (SIZE_PER_SIDE/dimBlock.x+1), (SIZE_PER_SIDE/dimBlock.y+1));
int output_neuron_size = OUTPUT_LAYER_NEURON_NUM - 1;
if(function_select==0){//run lateral_inhibition
lateral_inhibition<<<dimGrid, dimBlock>>>(NeuronList, output_neuron_size, inhibit_time);
}
else if(function_select==1){//run update threshold
//printf("\nTIME is: %d\n", time);
update_threshold<<<dimGrid, dimBlock>>>(NeuronList, network_size, log_total_spike, target_frequency, time);
}
else if(function_select==2){
read_learning_output<<<dimGrid, dimBlock>>>(NeuronList, network_size);
}
else if(function_select==3){
lateral_inhibition_CNN<<<dimGrid, dimBlock>>>(NeuronList, network_size, inhibit_time, log_spike);
}
}
void spiking_learning_drive(Neuron *NeuronList, int network_size, int inhibit_time, float *log_total_spike, float target_frequency, int time, float *log_spike, int current_layer, CNN_struct *CNN_setttings, int function_select){
int SIZE_PER_SIDE = sqrt(network_size)+1;
dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock );
dim3 dimGrid( (SIZE_PER_SIDE/dimBlock.x+1), (SIZE_PER_SIDE/dimBlock.y+1));
int output_neuron_size = OUTPUT_LAYER_NEURON_NUM - 1;
if(function_select==0){//run lateral_inhibition
lateral_inhibition<<<dimGrid, dimBlock>>>(NeuronList, output_neuron_size, inhibit_time);
}
else if(function_select==1){//run update threshold
//printf("\nTIME is: %d\n", time);
update_threshold<<<dimGrid, dimBlock>>>(NeuronList, network_size, log_total_spike, target_frequency, time);
}
else if(function_select==2){
read_learning_output<<<dimGrid, dimBlock>>>(NeuronList, network_size);
}
else if(function_select==3){
lateral_inhibition_CNN<<<dimGrid, dimBlock>>>(NeuronList, network_size, inhibit_time, log_spike);
}
else if(function_select==4){
//printf("\nNEW INHIB RUN\n\n");
float start_depth = CNN_setttings->layer[current_layer].first_depth_id - 0.1;
float end_depth = CNN_setttings->layer[current_layer].last_depth_id + 0.1;
//printf("Start_depth: %f, end_depth: %f||", start_depth, end_depth);
lateral_inhibition_2<<<dimGrid, dimBlock>>>(NeuronList, network_size, inhibit_time, start_depth, end_depth);
}
}
|
01bf543de4e94239badbbf1f1571a284c4bd15e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This code illustrates the use of the GPU to perform vector addition on arbirarily large vectors.
Author: Naga Kandasamy
Date modified: May 3, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <float.h>
/* Include kernel code during preprocessing step */
#include "vector_addition_kernel.cu"
#define THREAD_BLOCK_SIZE 128
#define NUM_THREAD_BLOCKS 240
void run_test(int);
void compute_on_device(float *, float *, float *, int);
void check_for_error(char const *);
extern "C" void compute_gold(float *, float *, float *, int);
int main(int argc, char **argv)
{
if (argc != 2) {
fprintf(stderr, "Usage: %s num-elements\n", argv[0]);
exit(EXIT_FAILURE);
}
int num_elements = atoi(argv[1]);
run_test(num_elements);
exit(EXIT_SUCCESS);
}
/* Perform vector addition on CPU and GPU */
void run_test(int num_elements)
{
float diff;
int i;
/* Allocate memory on CPU for input vectors A and B, and output vector C */
int vector_length = sizeof(float) * num_elements;
float *A = (float *)malloc(vector_length);
float *B = (float *)malloc(vector_length);
float *gold_result = (float *)malloc(vector_length); /* Result vector computed on CPU */
float *gpu_result = (float *)malloc(vector_length); /* Result vector computed on GPU */
/* Initialize input data to be integer values between 0 and 5 */
for (i = 0; i < num_elements; i++) {
A[i] = floorf(5 * (rand() / (float)RAND_MAX));
B[i] = floorf(5 * (rand() / (float)RAND_MAX));
}
/* Compute reference solution on CPU */
fprintf(stderr, "Adding vectors on the CPU\n");
compute_gold(A, B, gold_result, num_elements);
/* Compute result vector on GPU */
compute_on_device(A, B, gpu_result, num_elements);
/* Compute differences between the CPU and GPU results. */
diff = 0.0;
for (i = 0; i < num_elements; i++)
diff += fabsf(gold_result[i] - gpu_result[i]);
fprintf(stderr, "Difference between the CPU and GPU result: %f\n", diff);
/* Free the data structures. */
free((void *)A);
free((void *)B);
free((void *)gold_result);
free((void *)gpu_result);
exit(EXIT_SUCCESS);
}
/* Host side code.
Transfer vectors A and B from CPU to GPU, set up grid and
thread dimensions, execute kernel function, and copy result vector
back to CPU.
*/
void compute_on_device(float *A_on_host, float *B_on_host, float *gpu_result, int num_elements)
{
float *A_on_device = NULL;
float *B_on_device = NULL;
float *C_on_device = NULL;
/* Allocate space on GPU for vectors A and B, and copy contents of vectors to GPU */
hipMalloc((void **)&A_on_device, num_elements * sizeof(float));
hipMemcpy(A_on_device, A_on_host, num_elements * sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void **)&B_on_device, num_elements * sizeof(float));
hipMemcpy(B_on_device, B_on_host, num_elements * sizeof(float), hipMemcpyHostToDevice);
/* Allocate space for result vector on GPU */
hipMalloc((void **)&C_on_device, num_elements * sizeof(float));
/* Set up the execution grid on the GPU. */
int num_thread_blocks = NUM_THREAD_BLOCKS;
dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1); /* Set number of threads in the thread block */
fprintf(stderr, "Setting up a (%d x 1) execution grid\n", num_thread_blocks);
dim3 grid(num_thread_blocks, 1);
fprintf(stderr, "Adding vectors on the GPU\n");
/* Launch kernel with multiple thread blocks. The kernel call is non-blocking. */
hipLaunchKernelGGL(( vector_addition_kernel), dim3(grid), dim3(thread_block) , 0, 0, A_on_device, B_on_device, C_on_device, num_elements);
hipDeviceSynchronize(); /* Force CPU to wait for GPU to complete */
check_for_error("KERNEL FAILURE");
/* Copy result vector back from GPU and store */
hipMemcpy(gpu_result, C_on_device, num_elements * sizeof(float), hipMemcpyDeviceToHost);
/* Free memory on GPU */
hipFree(A_on_device);
hipFree(B_on_device);
hipFree(C_on_device);
}
/* Check for errors when executing the kernel */
void check_for_error(char const *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s (%s). \n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
| 01bf543de4e94239badbbf1f1571a284c4bd15e3.cu | /* This code illustrates the use of the GPU to perform vector addition on arbirarily large vectors.
Author: Naga Kandasamy
Date modified: May 3, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <float.h>
/* Include kernel code during preprocessing step */
#include "vector_addition_kernel.cu"
#define THREAD_BLOCK_SIZE 128
#define NUM_THREAD_BLOCKS 240
void run_test(int);
void compute_on_device(float *, float *, float *, int);
void check_for_error(char const *);
extern "C" void compute_gold(float *, float *, float *, int);
int main(int argc, char **argv)
{
if (argc != 2) {
fprintf(stderr, "Usage: %s num-elements\n", argv[0]);
exit(EXIT_FAILURE);
}
int num_elements = atoi(argv[1]);
run_test(num_elements);
exit(EXIT_SUCCESS);
}
/* Perform vector addition on CPU and GPU */
void run_test(int num_elements)
{
float diff;
int i;
/* Allocate memory on CPU for input vectors A and B, and output vector C */
int vector_length = sizeof(float) * num_elements;
float *A = (float *)malloc(vector_length);
float *B = (float *)malloc(vector_length);
float *gold_result = (float *)malloc(vector_length); /* Result vector computed on CPU */
float *gpu_result = (float *)malloc(vector_length); /* Result vector computed on GPU */
/* Initialize input data to be integer values between 0 and 5 */
for (i = 0; i < num_elements; i++) {
A[i] = floorf(5 * (rand() / (float)RAND_MAX));
B[i] = floorf(5 * (rand() / (float)RAND_MAX));
}
/* Compute reference solution on CPU */
fprintf(stderr, "Adding vectors on the CPU\n");
compute_gold(A, B, gold_result, num_elements);
/* Compute result vector on GPU */
compute_on_device(A, B, gpu_result, num_elements);
/* Compute differences between the CPU and GPU results. */
diff = 0.0;
for (i = 0; i < num_elements; i++)
diff += fabsf(gold_result[i] - gpu_result[i]);
fprintf(stderr, "Difference between the CPU and GPU result: %f\n", diff);
/* Free the data structures. */
free((void *)A);
free((void *)B);
free((void *)gold_result);
free((void *)gpu_result);
exit(EXIT_SUCCESS);
}
/* Host side code.
Transfer vectors A and B from CPU to GPU, set up grid and
thread dimensions, execute kernel function, and copy result vector
back to CPU.
*/
void compute_on_device(float *A_on_host, float *B_on_host, float *gpu_result, int num_elements)
{
float *A_on_device = NULL;
float *B_on_device = NULL;
float *C_on_device = NULL;
/* Allocate space on GPU for vectors A and B, and copy contents of vectors to GPU */
cudaMalloc((void **)&A_on_device, num_elements * sizeof(float));
cudaMemcpy(A_on_device, A_on_host, num_elements * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void **)&B_on_device, num_elements * sizeof(float));
cudaMemcpy(B_on_device, B_on_host, num_elements * sizeof(float), cudaMemcpyHostToDevice);
/* Allocate space for result vector on GPU */
cudaMalloc((void **)&C_on_device, num_elements * sizeof(float));
/* Set up the execution grid on the GPU. */
int num_thread_blocks = NUM_THREAD_BLOCKS;
dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1); /* Set number of threads in the thread block */
fprintf(stderr, "Setting up a (%d x 1) execution grid\n", num_thread_blocks);
dim3 grid(num_thread_blocks, 1);
fprintf(stderr, "Adding vectors on the GPU\n");
/* Launch kernel with multiple thread blocks. The kernel call is non-blocking. */
vector_addition_kernel<<< grid, thread_block >>>(A_on_device, B_on_device, C_on_device, num_elements);
cudaDeviceSynchronize(); /* Force CPU to wait for GPU to complete */
check_for_error("KERNEL FAILURE");
/* Copy result vector back from GPU and store */
cudaMemcpy(gpu_result, C_on_device, num_elements * sizeof(float), cudaMemcpyDeviceToHost);
/* Free memory on GPU */
cudaFree(A_on_device);
cudaFree(B_on_device);
cudaFree(C_on_device);
}
/* Check for errors when executing the kernel */
void check_for_error(char const *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s (%s). \n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
052ddfff901195682d14fd813331d481abd7065a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ ( 8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
// fprintf(stdout,"when s is %d,threads is %d,blocks is %d\n",n,threads,blocks);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel3(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i =2 * bid * blockDim.x + threadIdx.x;
if(i < n) {
scratch[threadIdx.x] = g_idata[i]+g_idata[i+blockDim.x];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x/2; s > 0; s = s >> 1) {
/*
if (threadIdx.x >= s) {
break;
}
scratch[threadIdx.x] += scratch[threadIdx.x + s];
*/
if (threadIdx.x < s){
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x == 0) {
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_3, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 3;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
hipDeviceSynchronize ();
t_kernel_3 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute first add GPU reduction kernel: %Lg secs\n", t_kernel_3);
double bw = (N * sizeof(dtype)) / (t_kernel_3 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| 052ddfff901195682d14fd813331d481abd7065a.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ ( 8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
// fprintf(stdout,"when s is %d,threads is %d,blocks is %d\n",n,threads,blocks);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel3(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i =2 * bid * blockDim.x + threadIdx.x;
if(i < n) {
scratch[threadIdx.x] = g_idata[i]+g_idata[i+blockDim.x];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x/2; s > 0; s = s >> 1) {
/*
if (threadIdx.x >= s) {
break;
}
scratch[threadIdx.x] += scratch[threadIdx.x + s];
*/
if (threadIdx.x < s){
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x == 0) {
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_3, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 3;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel3 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel3 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
kernel3 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
cudaThreadSynchronize ();
t_kernel_3 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute first add GPU reduction kernel: %Lg secs\n", t_kernel_3);
double bw = (N * sizeof(dtype)) / (t_kernel_3 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
c4a088523f942b5cd271fe9091ec661061453043.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "UpSample.hpp"
#include "common.hpp"
template <typename T>
__global__ void KernelResizeNearest(const T* in_data, int count, int channel,
int in_h, int in_w, int out_h, int out_w,
T* out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int temp = globalid / out_w;
int w_out = globalid % out_w;
int h_out = temp % out_h;
temp = temp / out_h;
int c_out = temp % channel;
int b_out = temp / channel;
float fh = static_cast<float>(in_h) / out_h;
float fw = static_cast<float>(in_w) / out_w;
int src_h = static_cast<int>(h_out * fh);
int src_w = static_cast<int>(w_out * fw);
int src_index = ((b_out * channel + c_out) * in_h + src_h) * in_w + src_w;
out_data[globalid] = in_data[src_index];
}
}
template <typename T>
__global__ void KernelResizeBilinear(const T* in_data, int count, int channel,
int in_h, int in_w, int out_h, int out_w,
T* out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int temp = globalid / out_w;
int w_out = globalid % out_w;
int h_out = temp % out_h;
temp = temp / out_h;
int c_out = temp % channel;
int b_out = temp / channel;
float fh = static_cast<float>(in_h) / out_h;
float fw = static_cast<float>(in_w) / out_w;
float src_h_f = (h_out + 0.5f) * fh - 0.5f;
int src_h = static_cast<int>(src_h_f);
float sh = src_h_f - src_h;
src_h = src_h < in_h - 2 ? src_h : in_h - 2;
src_h = src_h < 0 ? 0 : src_h;
float src_w_f = (w_out + 0.5f) * fw - 0.5f;
int src_w = static_cast<int>(src_w_f);
float sw = src_w_f - src_w;
src_w = src_w < in_w - 2 ? src_w : in_w - 2;
src_w = src_w < 0 ? 0 : src_w;
int src_index_0 = ((b_out * channel + c_out) * in_h + src_h) * in_w + src_w;
int src_index_1 =
((b_out * channel + c_out) * in_h + src_h + 1) * in_w + src_w;
int src_index_2 =
((b_out * channel + c_out) * in_h + src_h) * in_w + src_w + 1;
int src_index_3 =
((b_out * channel + c_out) * in_h + src_h + 1) * in_w + src_w + 1;
out_data[globalid] = static_cast<T>(
(1 - sh) * (1 - sw) * in_data[src_index_0] +
sh * (1 - sw) * in_data[src_index_1] +
(1 - sh) * sw * in_data[src_index_2] + sh * sw * in_data[src_index_3]);
}
}
template <typename T>
void Resize(const T* in_data, const vector<int>& in_shape, int type,
const vector<int>& out_shape, T* out_data) {
int batch = in_shape[0], channel = in_shape[1];
int in_h = in_shape[2], in_w = in_shape[3];
int out_h = out_shape[2], out_w = out_shape[3];
int count = batch * channel * out_h * out_w;
if (type == 0) {
hipLaunchKernelGGL(( KernelResizeNearest<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, 0,
in_data, count, channel, in_h, in_w, out_h, out_w, out_data);
} else if (type == 1) {
hipLaunchKernelGGL(( KernelResizeBilinear<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, 0,
in_data, count, channel, in_h, in_w, out_h, out_w, out_data);
}
//hipPeekAtLastError();
}
template void Resize(const float* in_data, const vector<int>& in_shape, int type,
const vector<int>& out_shape, float* out_data);
| c4a088523f942b5cd271fe9091ec661061453043.cu | #include "UpSample.hpp"
#include "common.hpp"
template <typename T>
__global__ void KernelResizeNearest(const T* in_data, int count, int channel,
int in_h, int in_w, int out_h, int out_w,
T* out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int temp = globalid / out_w;
int w_out = globalid % out_w;
int h_out = temp % out_h;
temp = temp / out_h;
int c_out = temp % channel;
int b_out = temp / channel;
float fh = static_cast<float>(in_h) / out_h;
float fw = static_cast<float>(in_w) / out_w;
int src_h = static_cast<int>(h_out * fh);
int src_w = static_cast<int>(w_out * fw);
int src_index = ((b_out * channel + c_out) * in_h + src_h) * in_w + src_w;
out_data[globalid] = in_data[src_index];
}
}
template <typename T>
__global__ void KernelResizeBilinear(const T* in_data, int count, int channel,
int in_h, int in_w, int out_h, int out_w,
T* out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int temp = globalid / out_w;
int w_out = globalid % out_w;
int h_out = temp % out_h;
temp = temp / out_h;
int c_out = temp % channel;
int b_out = temp / channel;
float fh = static_cast<float>(in_h) / out_h;
float fw = static_cast<float>(in_w) / out_w;
float src_h_f = (h_out + 0.5f) * fh - 0.5f;
int src_h = static_cast<int>(src_h_f);
float sh = src_h_f - src_h;
src_h = src_h < in_h - 2 ? src_h : in_h - 2;
src_h = src_h < 0 ? 0 : src_h;
float src_w_f = (w_out + 0.5f) * fw - 0.5f;
int src_w = static_cast<int>(src_w_f);
float sw = src_w_f - src_w;
src_w = src_w < in_w - 2 ? src_w : in_w - 2;
src_w = src_w < 0 ? 0 : src_w;
int src_index_0 = ((b_out * channel + c_out) * in_h + src_h) * in_w + src_w;
int src_index_1 =
((b_out * channel + c_out) * in_h + src_h + 1) * in_w + src_w;
int src_index_2 =
((b_out * channel + c_out) * in_h + src_h) * in_w + src_w + 1;
int src_index_3 =
((b_out * channel + c_out) * in_h + src_h + 1) * in_w + src_w + 1;
out_data[globalid] = static_cast<T>(
(1 - sh) * (1 - sw) * in_data[src_index_0] +
sh * (1 - sw) * in_data[src_index_1] +
(1 - sh) * sw * in_data[src_index_2] + sh * sw * in_data[src_index_3]);
}
}
template <typename T>
void Resize(const T* in_data, const vector<int>& in_shape, int type,
const vector<int>& out_shape, T* out_data) {
int batch = in_shape[0], channel = in_shape[1];
int in_h = in_shape[2], in_w = in_shape[3];
int out_h = out_shape[2], out_w = out_shape[3];
int count = batch * channel * out_h * out_w;
if (type == 0) {
KernelResizeNearest<T><<<GetBlocks(count), NumThreads>>>(
in_data, count, channel, in_h, in_w, out_h, out_w, out_data);
} else if (type == 1) {
KernelResizeBilinear<T><<<GetBlocks(count), NumThreads>>>(
in_data, count, channel, in_h, in_w, out_h, out_w, out_data);
}
//cudaPeekAtLastError();
}
template void Resize(const float* in_data, const vector<int>& in_shape, int type,
const vector<int>& out_shape, float* out_data);
|
602844bf3a4ceaa8d6dcf9030a52e2f03c26dd7e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya
* @date 2012-2015
* @copyright University of Pennsylvania & STUDENT
*/
#include "rasterize.h"
//<seqan / parallel.h>
#include <thrust/random.h>
#include <cmath>
#include <vector>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include "rasterizeTools.h"
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#define DEG2RAD PI/180.f
#define Tess 0
#define Blending 0
struct VertexIn {
glm::vec3 pos;
glm::vec3 nor;
glm::vec3 col;
// TODO (optional) add other vertex attributes (e.g. texture coordinates)
};
struct VertexOut {
// TODO
glm::vec3 pos;
glm::vec3 nor;
glm::vec3 col;
};
struct Triangle {
VertexOut v[3];
};
struct Fragment {
int dis;
glm::vec3 color;
glm::vec3 normal;
glm::vec3 pos;
glm::vec3 subcolor[4];
int subdis[4];
};
int N = 0;
int M = 0;
int mat = 0;
int dev = 0;
static int width = 0;
static int height = 0;
static int *dev_bufIdx = NULL;
static VertexIn *dev_bufVertex = NULL;
static VertexOut *dev_vsOutput = NULL;
static Triangle *dev_primitives = NULL;
static Fragment *dev_depthbuffer = NULL;
static Fragment *dev_fmInput = NULL;
static Fragment *dev_fmOutput = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int bufIdxSize = 0;
static int vertCount = 0;
__host__ __device__ inline unsigned int utilhash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__ void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
__global__ void cleanDepth(Fragment* dev_depthbuffer, Fragment* dev_fmInput, int w, int h)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
//float t = INFINITY;
if (x < w && y < h)
{
dev_depthbuffer[index].color = glm::vec3(1, 0, 0);
dev_depthbuffer[index].dis = INFINITY;
dev_depthbuffer[index].normal = glm::vec3(0, 1, 0);
dev_fmInput[index].normal = glm::vec3(0, 1, 0);
dev_fmInput[index].dis = INFINITY;
dev_fmInput[index].color = glm::vec3(1, 1, 1);
dev_fmInput[index].normal = glm::vec3(0, 1, 0);
}
}
// Writes fragment colors to the framebuffer
__global__ void render(int w, int h, Fragment *depthbuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
framebuffer[index] = depthbuffer[index].color;
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
hipFree(dev_depthbuffer);
hipMalloc(&dev_depthbuffer, width * height * sizeof(Fragment));
hipMemset(dev_depthbuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
hipFree(dev_fmInput);
hipMalloc(&dev_fmInput, 4 * width * height * sizeof(Fragment));
hipMemset(dev_fmInput, 0, 4 * width * height * sizeof(Fragment));
hipFree(dev_fmOutput);
hipMalloc(&dev_fmOutput, width * height * sizeof(Fragment));
hipMemset(dev_fmOutput, 0, width * height * sizeof(Fragment));
checkCUDAError("rasterizeInit");
}
/**
* Set all of the buffers necessary for rasterization.
*/
void rasterizeSetBuffers(
int _bufIdxSize, int *bufIdx,
int _vertCount, float *bufPos, float *bufNor, float *bufCol, bool resselation) {
//********************
resselation = Tess;
//********************
bufIdxSize = _bufIdxSize;
vertCount = _vertCount;
hipFree(dev_bufIdx);
hipMalloc(&dev_bufIdx, bufIdxSize * sizeof(int));
hipMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), hipMemcpyHostToDevice);
VertexIn *bufVertex = new VertexIn[_vertCount];
float maxv = -1.f;
for (int i = 0; i < vertCount; i++) {
int j = i * 3;
bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]);
bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]);
bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]);
//***********check here....*******//
float temp = ::max(bufVertex[i].pos.x, ::max(bufVertex[i].pos.y, bufVertex[i].pos.y));
if (temp>maxv){ maxv = temp; }
}
N = (int)maxv + 1;
hipFree(dev_bufVertex);
hipMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn));
hipMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), hipMemcpyHostToDevice);
hipFree(dev_vsOutput);
hipMalloc(&dev_vsOutput, vertCount * sizeof(VertexOut));
if (!resselation)
{
hipFree(dev_primitives);
hipMalloc(&dev_primitives, vertCount / 3 * sizeof(Triangle));
hipMemset(dev_primitives, 0, vertCount / 3 * sizeof(Triangle));
checkCUDAError("rasterizeSetBuffers");
}
else
{
hipFree(dev_primitives);
hipMalloc(&dev_primitives, vertCount / 3 * 4 * sizeof(Triangle));
hipMemset(dev_primitives, 0, vertCount / 3 * 4 * sizeof(Triangle));
checkCUDAError("rasterizeSetBuffers");
}
}
__global__ void vertexShader(VertexIn *dev_bufVertex, VertexOut *dev_vsOutput, int vertexCount, glm::mat4 ViewProj){
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < vertexCount){
//simple orthordox projection
//dev_vsOutput[id].pos = dev_bufVertex[id].pos;
//dev_vsOutput[id].nor = dev_bufVertex[id].nor;
dev_vsOutput[id].pos = multiplyMV(ViewProj, glm::vec4(dev_bufVertex[id].pos, 1));
dev_vsOutput[id].nor = multiplyMV(ViewProj, glm::vec4(dev_bufVertex[id].nor, 0));
dev_vsOutput[id].nor = glm::normalize(dev_vsOutput[id].nor);
dev_vsOutput[id].col = glm::vec3(1, 0, 0);
//dev_vsOutput[id].col = dev_bufVertex[id].col;
//interpolate the normal:smooth normal color??
}
}
__global__ void PrimitiveAssembly(VertexOut *dev_vsOutput, Triangle * dev_primitives, int verCount)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < verCount / 3){
dev_primitives[id].v[0].pos = dev_vsOutput[3 * id].pos;//012,345,678
dev_primitives[id].v[1].pos = dev_vsOutput[3 * id + 1].pos;
dev_primitives[id].v[2].pos = dev_vsOutput[3 * id + 2].pos;
dev_primitives[id].v[0].nor = dev_vsOutput[3 * id].nor;//012,345,678
dev_primitives[id].v[1].nor = dev_vsOutput[3 * id + 1].nor;
dev_primitives[id].v[2].nor = dev_vsOutput[3 * id + 2].nor;
dev_primitives[id].v[0].col = dev_vsOutput[3 * id].col;//012,345,678
dev_primitives[id].v[1].col = dev_vsOutput[3 * id + 1].col;
dev_primitives[id].v[2].col = dev_vsOutput[3 * id + 2].col;
}
}
__host__ __device__ bool fequal(float a, float b){
if (a > b - 0.000001&&a < b + 0.000001){ return true; }
else return false;
}
__device__ int _atomicMin(int *addr, int val)
{
int old = *addr, assumed;
if (old <= val) return old;
do{
assumed = old;
old = atomicCAS(addr, assumed, val);
} while (old != assumed);
return old;
}
/*{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < vertexcount / 3.f)
{
glm::vec3 tri[3];
for (int i = 0; i < 3; i++){
tri[i] = dev_primitives[id].v[i].pos;
tri[i].x += N;
tri[i].y += N;
tri[i].z += N;
tri[i].x *= w / (float)(2.f*N);
tri[i].y *= h / (float)(2.f*N);
tri[i].z *= w / (float)(2.f*N);
//because the image is cube anyway...I think multiply should have better result than devide...
}
AABB aabb;
aabb = getAABBForTriangle(tri);
for (int i = aabb.min.x - 1; i < aabb.max.x + 1; i += 0.5){
for (int j = aabb.min.y - 1; j < aabb.max.y + 1; j += 0.5){
if (tri[0].x > w || tri[0].x < 0 || tri[0].y>h || tri[0].x < 0)
{
//color[i*w + j].color = glm::vec3(0, 0, 0);//black
} //anti-aliansing..multisampling the patern 4 sample every pixel
glm::vec2 point(i, j);
glm::vec3 baryc = calculateBarycentricCoordinate(tri, point);
if (isBarycentricCoordInBounds(baryc))
{
int intdepth = getZAtCoordinate(baryc, tri);
int dis;
_atomicMin(&dis, intdepth);
if (intdepth == dis){
dev_fmInput[i*w + j].subcolor[k] = dev_primitives[id].v[0].nor;
}
dev_fmInput[i*w + j].pos = dev_primitives[id].v[0].pos;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
}
}
}
//else //pixel have more than 1 color
//{
/* glm::vec3 baryc_p[4];
int intdepth_s[4];
for (int p = 0; p < 4; p++)
{
baryc_p[p] = calculateBarycentricCoordinate(tri, random_point[p]);
if (isBarycentricCoordInBounds(baryc_p[p])){
intdepth_s[p] = getZAtCoordinate(baryc_p[p], tri);
_atomicMin(&dev_fmInput[i*w + j].subdis[p], intdepth_s[p]);
if (intdepth_s[p] == dev_fmInput[i*w + j].subdis[p]){
dev_fmInput[i*w + j].subcolor[p] = dev_primitives[id].v[0].nor;;
}
}
}
dev_fmInput[i*w + j].pos = dev_primitives[id].v[0].pos;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
// }
}
}
}
}
//dev_primitives, dev_fmInput*4, dev_fmOutput
/*__global__ void rasterization(Triangle * dev_primitives, Fragment *dev_fmInput, int vertexcount, int w, int h, int N)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < vertexcount / 3.f)
{
//potimized boundingbox;
glm::vec3 tri[3];
for (int i = 0; i < 3; i++){//(-1,1)+1*w/2
//(-10,10)+10*w/20
tri[i] = dev_primitives[id].v[i].pos;
tri[i].x += N;
tri[i].y += N;
tri[i].z += N;
tri[i].x *= w / (float)(2.f*N);
tri[i].y *= h / (float)(2.f*N);
tri[i].z *= w / (float)(2.f*N);
//because the image is cube anyway...I think multiply should have better result than devide...
}
AABB aabb;
aabb = getAABBForTriangle(tri);
for (int i = aabb.min.x - 1; i < aabb.max.x + 1; i++){
for (int j = aabb.min.y - 1; j < aabb.max.y + 1; j++){
if (tri[0].x > w || tri[0].x < 0 || tri[0].y>h || tri[0].x < 0)
{
//color[i*w + j].color = glm::vec3(0, 0, 0);//black
} //anti-aliansing..multisampling the patern 4 sample every pixel
glm::vec2 point(i + 0.5, j + 0.5);
thrust::default_random_engine rngx = makeSeededRandomEngine(i, id, 1);
thrust::default_random_engine rngy = makeSeededRandomEngine(j, id, 1);
thrust::uniform_real_distribution<float> u1(0, 0.5);
thrust::uniform_real_distribution<float> u2(0.5, 0.999);
glm::vec2 random_point[4];
int number = 0;
//random_point[0].x = i + u1(rngx);//-1,1
//random_point[0].y = j + u1(rngy);
//random_point[1].x = i + u2(rngx);//-1,-1
//random_point[1].y = j + u1(rngy);
//random_point[2].x = i + u1(rngx);//1,1
//random_point[2].y = j + u2(rngy);
//random_point[3].x = i + u2(rngx);//i+0+0.22,i+
//random_point[3].y = j + u2(rngy);
random_point[0].x = i + 0.25;//-1,1
random_point[0].y = j + 0.25;
random_point[1].x = i + 0.25;//-1,-1
random_point[1].y = j + 0.75;
random_point[2].x = i + 0.75;//1,1
random_point[2].y = j + 0.25;
random_point[3].x = i + 0.75;//i+0+0.22,i+
random_point[3].y = j + 0.75;
for (int t = 0; t < 4;t++){
glm::vec3 baryc_sub = calculateBarycentricCoordinate(tri, random_point[t]);
if (isBarycentricCoordInBounds(baryc_sub))
{
number++;
}
}
/* if (number == 4)//all in
{
glm::vec3 baryc = calculateBarycentricCoordinate(tri, point);
if (isBarycentricCoordInBounds(baryc)){
int intdepth = getZAtCoordinate(baryc, tri);
_atomicMin(&(dev_fmInput[i*w + j].dis), intdepth);
if (intdepth == dev_fmInput[i*w + j].dis){
for (int k = 0; k < 4; k++){
dev_fmInput[i*w + j].subcolor[k] = dev_primitives[id].v[0].nor;
}
dev_fmInput[i*w + j].pos= dev_primitives[id].v[0].pos;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
}
}
}*/
//else //pixel have more than 1 color
//{
/* glm::vec3 baryc_p[4];
int intdepth_s[4];
for (int p = 0; p < 4; p++)
{
baryc_p[p] = calculateBarycentricCoordinate(tri, random_point[p]);
if (isBarycentricCoordInBounds(baryc_p[p])){
intdepth_s[p] = getZAtCoordinate(baryc_p[p], tri);
_atomicMin(&dev_fmInput[i*w + j].subdis[p], intdepth_s[p]);
if (intdepth_s[p] == dev_fmInput[i*w + j].subdis[p]){
dev_fmInput[i*w + j].subcolor[p] = dev_primitives[id].v[0].nor;;
}
}
}
dev_fmInput[i*w + j].pos = dev_primitives[id].v[0].pos;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
// }
}
}
}
}*/
/*__global__ void rasterization(Triangle * dev_primitives, Fragment *dev_fmInput, int vertexcount, int w, int h, int N)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < vertexcount / 3.f)
{
//potimized boundingbox;
glm::vec3 tri[3];
for (int i = 0; i < 3; i++){//(-1,1)+1*w/2
//(-10,10)+10*w/20
tri[i] = dev_primitives[id].v[i].pos;
tri[i].x += N;
tri[i].y += N;
tri[i].z += N;
tri[i].x *= w / (float)(2.f*N);
tri[i].y *= h / (float)(2.f*N);
tri[i].z *= w / (float)(2.f*N);
//because the image is cube anyway...I think multiply should have better result than devide...
}
AABB aabb;
aabb = getAABBForTriangle(tri);
for (int i = aabb.min.x - 1; i < aabb.max.x + 1; i++){
for (int j = aabb.min.y - 1; j < aabb.max.y + 1; j++){
glm::vec2 point(i, j);
glm::vec3 baryc = calculateBarycentricCoordinate(tri, point);
//simple clip..
if (tri[0].x > w || tri[0].x < 0 || tri[0].y>h || tri[0].x < 0)continue;
if (isBarycentricCoordInBounds(baryc)){
//these three normal should be the same since they are on the same face (checked)
int intdepth = (int)getZAtCoordinate(baryc, tri);
//atomicMin(int* address, int val)
//reads word old located at the address, computes the minimum of old and val,
//and stores the result back to memory at the same address. returns old
atomicMin(&dev_fmInput[i*w + j].dis, intdepth);
if (dev_fmInput[i*w + j].dis == intdepth){
dev_fmInput[i*w + j].color = dev_primitives[id].v[0].col;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
dev_fmInput[i*w + j].pos = (dev_primitives[id].v[0].pos + dev_primitives[id].v[1].pos + dev_primitives[id].v[2].pos) / 3.f;
}
}
}
}
}
}*/
__global__ void rasterization(Triangle * dev_primitives, Fragment *dev_fmInput, int vertexcount, int w, int h, int N)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < vertexcount / 3.f)
{
//potimized boundingbox;
glm::vec3 tri[3];
for (int i = 0; i < 3; i++){//(-1,1)+1*w/2
//(-10,10)+10*w/20
tri[i] = dev_primitives[id].v[i].pos;
tri[i].x += N;
tri[i].y += N;
tri[i].z += N;
tri[i].x *= w / (float)(2.f*N);
tri[i].y *= h / (float)(2.f*N);
tri[i].z *= w / (float)(2.f*N);
//because the image is cube anyway...I think multiply should have better result than devide...
}
AABB aabb;
aabb = getAABBForTriangle(tri);
for (int i = aabb.min.x - 1; i < aabb.max.x + 1; i++){
for (int j = aabb.min.y - 1; j < aabb.max.y + 1; j++){
glm::vec2 point(i, j);
glm::vec3 baryc = calculateBarycentricCoordinate(tri, point);
//random sample anti-aliansing 1-sample..
thrust::default_random_engine rngx = makeSeededRandomEngine(i, id, 1);
thrust::default_random_engine rngy = makeSeededRandomEngine(j, id, 1);
thrust::uniform_real_distribution<float> u1(0, 1);
thrust::uniform_real_distribution<float> u2(0.5, 0.999);
//simple clip..
point =glm::vec2(i + u1(rngx), j + u1(rngy));
if (tri[0].x > w || tri[0].x < 0 || tri[0].y>h || tri[0].x < 0)continue;
if (isBarycentricCoordInBounds(baryc)){
//these three normal should be the same since they are on the same face (checked)
int intdepth = (int)getZAtCoordinate(baryc, tri);
//atomicMin(int* address, int val)
//reads word old located at the address, computes the minimum of old and val,
//and stores the result back to memory at the same address. returns old
atomicMin(&dev_fmInput[i*w + j].dis, intdepth);
if (dev_fmInput[i*w + j].dis == intdepth){
dev_fmInput[i*w + j].color = dev_primitives[id].v[0].col;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
dev_fmInput[i*w + j].pos = (dev_primitives[id].v[0].pos + dev_primitives[id].v[1].pos + dev_primitives[id].v[2].pos) / 3.f;
}
}
}
}
}
}
__global__ void Tesselation(bool active, VertexOut *dev_vertin, Triangle *dev_triout, int vercount)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (active&&id < vercount / 3.f)
{
int tessel_number = 3;
glm::vec3 tri[3];
tri[0] = dev_vertin[3 * id].pos;
tri[1] = dev_vertin[3 * id + 1].pos;
tri[2] = dev_vertin[3 * id + 2].pos;
//default tesselation,generate 4 triangles automativaly
glm::vec3 vnew[3];
vnew[0] = (tri[0] + tri[1]) / 2.f;
vnew[1] = (tri[0] + tri[2]) / 2.f;
vnew[2] = (tri[2] + tri[1]) / 2.f;
dev_triout[4 * id].v[0].pos = tri[0];
dev_triout[4 * id].v[1].pos = vnew[0];
dev_triout[4 * id].v[2].pos = vnew[1];
dev_triout[4 * id + 1].v[0].pos = vnew[0];
dev_triout[4 * id + 1].v[1].pos = tri[1];
dev_triout[4 * id + 1].v[2].pos = vnew[2];
dev_triout[4 * id + 2].v[0].pos = vnew[0];
dev_triout[4 * id + 2].v[1].pos = vnew[2];
dev_triout[4 * id + 2].v[2].pos = vnew[1];
dev_triout[4 * id + 3].v[0].pos = vnew[1];
dev_triout[4 * id + 3].v[1].pos = vnew[2];
dev_triout[4 * id + 3].v[2].pos = tri[2];
/*for (int i = 0; i < 4; i++){
for (int j = 0; j < 3; j++)
{
dev_triout[4 * id + i].v[j].nor = dev_vertin[3 * id].nor;
}
}*/
//in order to check :change the normal a little
for (int i = 0; i < 3; i++){
{
dev_triout[4 * id].v[i].nor = glm::normalize(dev_vertin[3 * id].nor + glm::vec3(0.3, 0, 0));
dev_triout[4 * id + 1].v[i].nor = glm::normalize(dev_vertin[3 * id].nor + glm::vec3(0, 0.3, 0));
dev_triout[4 * id + 2].v[i].nor = glm::normalize(dev_vertin[3 * id].nor + glm::vec3(0, 0, 0));
dev_triout[4 * id + 3].v[i].nor = glm::normalize(dev_vertin[3 * id].nor + glm::vec3(0, 0, 0.3));
}
}
}
}
/* scan_line:brute force
glm::vec3 tri[3];
for (int i = 0; i < 3; i++){
tri[i] = dev_primitives[id].v[i].pos;
tri[i].x += 1;
tri[i].y += 1;
tri[i].x *= w / 2.f;
tri[i].y *= h / 2.f;
}
for (int i = 0; i < w; i++){
for (int j = 0; j < h; j++){
glm::vec2 point(i, j);
glm::vec3 baryc = calculateBarycentricCoordinate(tri, point);
if (isBarycentricCoordInBounds(baryc)){
dev_fmInput[i*w + j].color = glm::vec3(1, 0, 0);
}
}*/
glm::vec3 SetLight()
{
glm::vec3 light_pos = glm::vec3(2, 1, 2);
return light_pos;
}
//blin phong
/*__global__ void antialiansing(Triangle *dev_in,Triangle *dev_out,int trianglecount)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < trianglecount)
{
}
}*/
//input output depthbuffer
__global__ void fragmentShading(Fragment *dev_fmInput, Fragment *dev_fmOutput, int w, int h, glm::vec3 light_pos, glm::vec3 camera_pos, bool defaultbackground)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < w*h){
__syncthreads();
///glm::vec3 ccc = (dev_fmInput[id].subcolor[0] + dev_fmInput[id].subcolor[1] + dev_fmInput[id].subcolor[2] + dev_fmInput[id].subcolor[3]) / 4.f;
glm::vec3 ccc = dev_fmInput[id].color;
float specular_power = 100;
glm::vec3 specular_color = glm::vec3(1, 1, 1);//dev_fmInput[id].color;
glm::vec3 lightray = light_pos - dev_fmInput[id].pos;
glm::vec3 inray = camera_pos - dev_fmInput[id].pos;
glm::vec3 H = glm::normalize(inray) + glm::normalize(lightray);
H = glm::vec3(H.x / 2.0, H.y / 2.0, H.z / 2.0);
float hdot = glm::dot(H, dev_fmInput[id].normal);
float x = pow(hdot, specular_power);
if (x < 0)x = 0.f;
glm::vec3 spec = x*specular_color;
glm::vec3 Lambert = glm::vec3(1, 1, 1);
glm::vec3 Ambient = ccc;
float diffuse = glm::clamp(glm::dot(dev_fmInput[id].normal, glm::normalize(lightray)), 0.0f, 1.0f);
Lambert *= diffuse;
glm::vec3 phong_color = 0.5f*spec + 0.4f*Lambert + 0.1f*Ambient;//where is ambient light?
phong_color = glm::clamp(phong_color, 0.f, 1.f);
//dev_fmOutput[id].color = phong_color;
//blending
//DestinationColor.rgb = (SourceColor.rgb * One) + (DestinationColor.rgb * (1 - SourceColor.a));
if (Blending){
if (defaultbackground)
{
glm::vec3 background = glm::vec3(0, 0, 1);
float default_a = 0.8;
dev_fmOutput[id].color = phong_color + (background * (1 - default_a));
}
else
{
float depth = dev_fmInput[id].dis;
if (depth > 0) {
dev_fmOutput[id].color = glm::vec3(0.8, 0.8, 0.8);
}
else dev_fmOutput[id].color = (-depth)* phong_color + (1 + depth)*glm::vec3(0.8, 0.8, 0.8);
}
}
else dev_fmOutput[id].color = phong_color;
}
}
/*
* Perform rasterization.
*/
void RotateAboutRight(float deg, glm::vec3 &ref, const glm::vec3 right, const glm::vec3 eye)
{
deg *= DEG2RAD;
glm::mat4 rotation = glm::rotate(glm::mat4(1.0f), deg, right);
ref = ref - eye;
ref = glm::vec3(rotation * glm::vec4(ref, 1));
ref = ref + eye;
}
void TranslateAlongRight(float amt, glm::vec3 &ref, const glm::vec3 right, glm::vec3 &eye)
{
glm::vec3 translation = right * amt;
eye += translation;
ref += translation;
}
void RotateAboutUp(float deg, glm::vec3 &ref, const glm::vec3 right, const glm::vec3 eye, const glm::vec3 up)
{
deg *= DEG2RAD;
glm::mat4 rotation = glm::rotate(glm::mat4(1.0f), deg, up);
ref = ref - eye;
ref = glm::vec3(rotation * glm::vec4(ref, 1));
ref = ref + eye;
}
void TranslateAlongLook(float amt, const glm::vec3 look, glm::vec3 &eye, glm::vec3 & ref)
{
glm::vec3 translation = look * amt;
eye += translation;
ref += translation;
}
void TranslateAlongUp(float amt, glm::vec3 &eye, glm::vec3 & ref, const glm::vec3 up)
{
glm::vec3 translation = up * amt;
eye += translation;
ref += translation;
}
glm::mat4 camera(float x_trans_amount, float y_trans_amount, float up_angle_amount, float right_angle_amount, glm::vec3 &camerapos)
{
glm::vec3 eye = glm::vec3(3, 0, 3);
glm::vec3 up = glm::vec3(0, 1, 0);
glm::vec3 ref = glm::vec3(0, 0, 0);
camerapos = eye;
float near_clip = 1.0f;
float far_clip = 1000.f;
float width = 800;
float height = 800;
float aspect = (float)width / (float)height;
float fovy = 45.f;
glm::vec3 world_up = glm::vec3(0, 1, 0);
glm::vec3 look = glm::normalize(ref - eye);
glm::vec3 right = glm::normalize(glm::cross(look, world_up));
RotateAboutRight(right_angle_amount, ref, right, eye);
RotateAboutUp(up_angle_amount, ref, right, eye, up);
TranslateAlongRight(x_trans_amount, ref, right, eye);
TranslateAlongUp(y_trans_amount, eye, ref, up);
glm::mat4 viewMatrix = glm::lookAt(eye, ref, up);
glm::mat4 projectionMatrix = glm::perspective(fovy, aspect, near_clip, far_clip);//fovy,aspect, zNear, zFar;
glm::mat4 getViewProj = projectionMatrix*viewMatrix;
return getViewProj;
}
void rasterize(uchar4 *pbo, float amt_x, float amt_y, float up_a, float right_a)
{
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
//key_test:
//std::cout << "ss " << amt_x << "and " << amt_y << std::endl;
//std::cout << "dd" << up_a << "and " << right_a << std::endl;
//step1.vertex shading
int blockSize1d = 256;
int blockCount1d = (vertCount + blockSize1d - 1) / blockSize1d;
int image_blockSize1d = 256;
int image_blockCount1d = (width*height + image_blockSize1d - 1) / image_blockSize1d;
glm::vec3 camera_pos = glm::vec3(0);
glm::vec3 light_pos = SetLight();
glm::mat4 getViewProj = camera(amt_x,amt_y, up_a,right_a, camera_pos);
//glm::mat4 getViewProj = glm::mat4(1);
//clean depth buffer
cleanDepth << < image_blockCount1d, image_blockSize1d >> >(dev_depthbuffer, dev_fmInput, width, height);
checkCUDAError("clean");
vertexShader << <blockCount1d, blockSize1d >> >(dev_bufVertex, dev_vsOutput, vertCount, getViewProj);
checkCUDAError("vertexShader");
//step2.primitive assembly
int blockCount1d_tri;
bool tesselation = Tess;
if (!tesselation)
{
//vertexnumber: vertcount,triangle number:vertcount/3.0
blockCount1d_tri = blockCount1d / 3 + 1;
PrimitiveAssembly << < blockCount1d_tri, blockSize1d >> >(dev_vsOutput, dev_primitives, vertCount);
checkCUDAError("PrimitiveAssembly");
rasterization << < blockCount1d_tri, blockSize1d >> >(dev_primitives, dev_fmInput, vertCount, width, height, N);
checkCUDAError("rasterization");
}
else
{
blockCount1d_tri = blockCount1d / 3 * 4 + 1;
//vertexnumber: vertcount*12,triangle number:vertcount*12/3.0
Tesselation << <blockCount1d_tri, blockSize1d >> >(1, dev_vsOutput, dev_primitives, vertCount);
checkCUDAError("Tesselation");
rasterization << < blockCount1d_tri, blockSize1d >> >(dev_primitives, dev_fmInput, vertCount * 4, width, height, N);
checkCUDAError("rasterization");
}
//blin-phong+blending
fragmentShading << <image_blockCount1d, image_blockSize1d >> >(dev_fmInput, dev_depthbuffer, width, height, light_pos, camera_pos, 1);
checkCUDAError("shading");
//blending << <image_blockCount1d, image_blockSize1d >> >(dev_fmOutput, dev_depthbuffer, N, 1);
checkCUDAError("blending");
render << <blockCount2d, blockSize2d >> >(width, height, dev_depthbuffer, dev_framebuffer);
sendImageToPBO << <blockCount2d, blockSize2d >> >(pbo, width, height, dev_framebuffer);
checkCUDAError("sendToPBO");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
hipFree(dev_bufIdx);
dev_bufIdx = NULL;
hipFree(dev_bufVertex);
dev_bufVertex = NULL;
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_vsOutput);
dev_fmInput = NULL;
hipFree(dev_fmInput);
dev_fmInput = NULL;
hipFree(dev_depthbuffer);
dev_depthbuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
checkCUDAError("rasterizeFree");
}
| 602844bf3a4ceaa8d6dcf9030a52e2f03c26dd7e.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya
* @date 2012-2015
* @copyright University of Pennsylvania & STUDENT
*/
#include "rasterize.h"
//<seqan / parallel.h>
#include <thrust/random.h>
#include <cmath>
#include <vector>
#include <cstdio>
#include <cuda.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include "rasterizeTools.h"
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#define DEG2RAD PI/180.f
#define Tess 0
#define Blending 0
struct VertexIn {
glm::vec3 pos;
glm::vec3 nor;
glm::vec3 col;
// TODO (optional) add other vertex attributes (e.g. texture coordinates)
};
struct VertexOut {
// TODO
glm::vec3 pos;
glm::vec3 nor;
glm::vec3 col;
};
struct Triangle {
VertexOut v[3];
};
struct Fragment {
int dis;
glm::vec3 color;
glm::vec3 normal;
glm::vec3 pos;
glm::vec3 subcolor[4];
int subdis[4];
};
int N = 0;
int M = 0;
int mat = 0;
int dev = 0;
static int width = 0;
static int height = 0;
static int *dev_bufIdx = NULL;
static VertexIn *dev_bufVertex = NULL;
static VertexOut *dev_vsOutput = NULL;
static Triangle *dev_primitives = NULL;
static Fragment *dev_depthbuffer = NULL;
static Fragment *dev_fmInput = NULL;
static Fragment *dev_fmOutput = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int bufIdxSize = 0;
static int vertCount = 0;
__host__ __device__ inline unsigned int utilhash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__ void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
__global__ void cleanDepth(Fragment* dev_depthbuffer, Fragment* dev_fmInput, int w, int h)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
//float t = INFINITY;
if (x < w && y < h)
{
dev_depthbuffer[index].color = glm::vec3(1, 0, 0);
dev_depthbuffer[index].dis = INFINITY;
dev_depthbuffer[index].normal = glm::vec3(0, 1, 0);
dev_fmInput[index].normal = glm::vec3(0, 1, 0);
dev_fmInput[index].dis = INFINITY;
dev_fmInput[index].color = glm::vec3(1, 1, 1);
dev_fmInput[index].normal = glm::vec3(0, 1, 0);
}
}
// Writes fragment colors to the framebuffer
__global__ void render(int w, int h, Fragment *depthbuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
framebuffer[index] = depthbuffer[index].color;
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
cudaFree(dev_depthbuffer);
cudaMalloc(&dev_depthbuffer, width * height * sizeof(Fragment));
cudaMemset(dev_depthbuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_fmInput);
cudaMalloc(&dev_fmInput, 4 * width * height * sizeof(Fragment));
cudaMemset(dev_fmInput, 0, 4 * width * height * sizeof(Fragment));
cudaFree(dev_fmOutput);
cudaMalloc(&dev_fmOutput, width * height * sizeof(Fragment));
cudaMemset(dev_fmOutput, 0, width * height * sizeof(Fragment));
checkCUDAError("rasterizeInit");
}
/**
* Set all of the buffers necessary for rasterization.
*/
void rasterizeSetBuffers(
int _bufIdxSize, int *bufIdx,
int _vertCount, float *bufPos, float *bufNor, float *bufCol, bool resselation) {
//********************
resselation = Tess;
//********************
bufIdxSize = _bufIdxSize;
vertCount = _vertCount;
cudaFree(dev_bufIdx);
cudaMalloc(&dev_bufIdx, bufIdxSize * sizeof(int));
cudaMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), cudaMemcpyHostToDevice);
VertexIn *bufVertex = new VertexIn[_vertCount];
float maxv = -1.f;
for (int i = 0; i < vertCount; i++) {
int j = i * 3;
bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]);
bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]);
bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]);
//***********check here....*******//
float temp = std::max(bufVertex[i].pos.x, std::max(bufVertex[i].pos.y, bufVertex[i].pos.y));
if (temp>maxv){ maxv = temp; }
}
N = (int)maxv + 1;
cudaFree(dev_bufVertex);
cudaMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn));
cudaMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), cudaMemcpyHostToDevice);
cudaFree(dev_vsOutput);
cudaMalloc(&dev_vsOutput, vertCount * sizeof(VertexOut));
if (!resselation)
{
cudaFree(dev_primitives);
cudaMalloc(&dev_primitives, vertCount / 3 * sizeof(Triangle));
cudaMemset(dev_primitives, 0, vertCount / 3 * sizeof(Triangle));
checkCUDAError("rasterizeSetBuffers");
}
else
{
cudaFree(dev_primitives);
cudaMalloc(&dev_primitives, vertCount / 3 * 4 * sizeof(Triangle));
cudaMemset(dev_primitives, 0, vertCount / 3 * 4 * sizeof(Triangle));
checkCUDAError("rasterizeSetBuffers");
}
}
__global__ void vertexShader(VertexIn *dev_bufVertex, VertexOut *dev_vsOutput, int vertexCount, glm::mat4 ViewProj){
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < vertexCount){
//simple orthordox projection
//dev_vsOutput[id].pos = dev_bufVertex[id].pos;
//dev_vsOutput[id].nor = dev_bufVertex[id].nor;
dev_vsOutput[id].pos = multiplyMV(ViewProj, glm::vec4(dev_bufVertex[id].pos, 1));
dev_vsOutput[id].nor = multiplyMV(ViewProj, glm::vec4(dev_bufVertex[id].nor, 0));
dev_vsOutput[id].nor = glm::normalize(dev_vsOutput[id].nor);
dev_vsOutput[id].col = glm::vec3(1, 0, 0);
//dev_vsOutput[id].col = dev_bufVertex[id].col;
//interpolate the normal:smooth normal color??
}
}
__global__ void PrimitiveAssembly(VertexOut *dev_vsOutput, Triangle * dev_primitives, int verCount)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < verCount / 3){
dev_primitives[id].v[0].pos = dev_vsOutput[3 * id].pos;//012,345,678
dev_primitives[id].v[1].pos = dev_vsOutput[3 * id + 1].pos;
dev_primitives[id].v[2].pos = dev_vsOutput[3 * id + 2].pos;
dev_primitives[id].v[0].nor = dev_vsOutput[3 * id].nor;//012,345,678
dev_primitives[id].v[1].nor = dev_vsOutput[3 * id + 1].nor;
dev_primitives[id].v[2].nor = dev_vsOutput[3 * id + 2].nor;
dev_primitives[id].v[0].col = dev_vsOutput[3 * id].col;//012,345,678
dev_primitives[id].v[1].col = dev_vsOutput[3 * id + 1].col;
dev_primitives[id].v[2].col = dev_vsOutput[3 * id + 2].col;
}
}
__host__ __device__ bool fequal(float a, float b){
if (a > b - 0.000001&&a < b + 0.000001){ return true; }
else return false;
}
__device__ int _atomicMin(int *addr, int val)
{
int old = *addr, assumed;
if (old <= val) return old;
do{
assumed = old;
old = atomicCAS(addr, assumed, val);
} while (old != assumed);
return old;
}
/*{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < vertexcount / 3.f)
{
glm::vec3 tri[3];
for (int i = 0; i < 3; i++){
tri[i] = dev_primitives[id].v[i].pos;
tri[i].x += N;
tri[i].y += N;
tri[i].z += N;
tri[i].x *= w / (float)(2.f*N);
tri[i].y *= h / (float)(2.f*N);
tri[i].z *= w / (float)(2.f*N);
//because the image is cube anyway...I think multiply should have better result than devide...
}
AABB aabb;
aabb = getAABBForTriangle(tri);
for (int i = aabb.min.x - 1; i < aabb.max.x + 1; i += 0.5){
for (int j = aabb.min.y - 1; j < aabb.max.y + 1; j += 0.5){
if (tri[0].x > w || tri[0].x < 0 || tri[0].y>h || tri[0].x < 0)
{
//color[i*w + j].color = glm::vec3(0, 0, 0);//black
} //anti-aliansing..multisampling the patern 4 sample every pixel
glm::vec2 point(i, j);
glm::vec3 baryc = calculateBarycentricCoordinate(tri, point);
if (isBarycentricCoordInBounds(baryc))
{
int intdepth = getZAtCoordinate(baryc, tri);
int dis;
_atomicMin(&dis, intdepth);
if (intdepth == dis){
dev_fmInput[i*w + j].subcolor[k] = dev_primitives[id].v[0].nor;
}
dev_fmInput[i*w + j].pos = dev_primitives[id].v[0].pos;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
}
}
}
//else //pixel have more than 1 color
//{
/* glm::vec3 baryc_p[4];
int intdepth_s[4];
for (int p = 0; p < 4; p++)
{
baryc_p[p] = calculateBarycentricCoordinate(tri, random_point[p]);
if (isBarycentricCoordInBounds(baryc_p[p])){
intdepth_s[p] = getZAtCoordinate(baryc_p[p], tri);
_atomicMin(&dev_fmInput[i*w + j].subdis[p], intdepth_s[p]);
if (intdepth_s[p] == dev_fmInput[i*w + j].subdis[p]){
dev_fmInput[i*w + j].subcolor[p] = dev_primitives[id].v[0].nor;;
}
}
}
dev_fmInput[i*w + j].pos = dev_primitives[id].v[0].pos;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
// }
}
}
}
}
//dev_primitives, dev_fmInput*4, dev_fmOutput
/*__global__ void rasterization(Triangle * dev_primitives, Fragment *dev_fmInput, int vertexcount, int w, int h, int N)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < vertexcount / 3.f)
{
//potimized boundingbox;
glm::vec3 tri[3];
for (int i = 0; i < 3; i++){//(-1,1)+1*w/2
//(-10,10)+10*w/20
tri[i] = dev_primitives[id].v[i].pos;
tri[i].x += N;
tri[i].y += N;
tri[i].z += N;
tri[i].x *= w / (float)(2.f*N);
tri[i].y *= h / (float)(2.f*N);
tri[i].z *= w / (float)(2.f*N);
//because the image is cube anyway...I think multiply should have better result than devide...
}
AABB aabb;
aabb = getAABBForTriangle(tri);
for (int i = aabb.min.x - 1; i < aabb.max.x + 1; i++){
for (int j = aabb.min.y - 1; j < aabb.max.y + 1; j++){
if (tri[0].x > w || tri[0].x < 0 || tri[0].y>h || tri[0].x < 0)
{
//color[i*w + j].color = glm::vec3(0, 0, 0);//black
} //anti-aliansing..multisampling the patern 4 sample every pixel
glm::vec2 point(i + 0.5, j + 0.5);
thrust::default_random_engine rngx = makeSeededRandomEngine(i, id, 1);
thrust::default_random_engine rngy = makeSeededRandomEngine(j, id, 1);
thrust::uniform_real_distribution<float> u1(0, 0.5);
thrust::uniform_real_distribution<float> u2(0.5, 0.999);
glm::vec2 random_point[4];
int number = 0;
//random_point[0].x = i + u1(rngx);//-1,1
//random_point[0].y = j + u1(rngy);
//random_point[1].x = i + u2(rngx);//-1,-1
//random_point[1].y = j + u1(rngy);
//random_point[2].x = i + u1(rngx);//1,1
//random_point[2].y = j + u2(rngy);
//random_point[3].x = i + u2(rngx);//i+0+0.22,i+
//random_point[3].y = j + u2(rngy);
random_point[0].x = i + 0.25;//-1,1
random_point[0].y = j + 0.25;
random_point[1].x = i + 0.25;//-1,-1
random_point[1].y = j + 0.75;
random_point[2].x = i + 0.75;//1,1
random_point[2].y = j + 0.25;
random_point[3].x = i + 0.75;//i+0+0.22,i+
random_point[3].y = j + 0.75;
for (int t = 0; t < 4;t++){
glm::vec3 baryc_sub = calculateBarycentricCoordinate(tri, random_point[t]);
if (isBarycentricCoordInBounds(baryc_sub))
{
number++;
}
}
/* if (number == 4)//all in
{
glm::vec3 baryc = calculateBarycentricCoordinate(tri, point);
if (isBarycentricCoordInBounds(baryc)){
int intdepth = getZAtCoordinate(baryc, tri);
_atomicMin(&(dev_fmInput[i*w + j].dis), intdepth);
if (intdepth == dev_fmInput[i*w + j].dis){
for (int k = 0; k < 4; k++){
dev_fmInput[i*w + j].subcolor[k] = dev_primitives[id].v[0].nor;
}
dev_fmInput[i*w + j].pos= dev_primitives[id].v[0].pos;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
}
}
}*/
//else //pixel have more than 1 color
//{
/* glm::vec3 baryc_p[4];
int intdepth_s[4];
for (int p = 0; p < 4; p++)
{
baryc_p[p] = calculateBarycentricCoordinate(tri, random_point[p]);
if (isBarycentricCoordInBounds(baryc_p[p])){
intdepth_s[p] = getZAtCoordinate(baryc_p[p], tri);
_atomicMin(&dev_fmInput[i*w + j].subdis[p], intdepth_s[p]);
if (intdepth_s[p] == dev_fmInput[i*w + j].subdis[p]){
dev_fmInput[i*w + j].subcolor[p] = dev_primitives[id].v[0].nor;;
}
}
}
dev_fmInput[i*w + j].pos = dev_primitives[id].v[0].pos;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
// }
}
}
}
}*/
/*__global__ void rasterization(Triangle * dev_primitives, Fragment *dev_fmInput, int vertexcount, int w, int h, int N)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < vertexcount / 3.f)
{
//potimized boundingbox;
glm::vec3 tri[3];
for (int i = 0; i < 3; i++){//(-1,1)+1*w/2
//(-10,10)+10*w/20
tri[i] = dev_primitives[id].v[i].pos;
tri[i].x += N;
tri[i].y += N;
tri[i].z += N;
tri[i].x *= w / (float)(2.f*N);
tri[i].y *= h / (float)(2.f*N);
tri[i].z *= w / (float)(2.f*N);
//because the image is cube anyway...I think multiply should have better result than devide...
}
AABB aabb;
aabb = getAABBForTriangle(tri);
for (int i = aabb.min.x - 1; i < aabb.max.x + 1; i++){
for (int j = aabb.min.y - 1; j < aabb.max.y + 1; j++){
glm::vec2 point(i, j);
glm::vec3 baryc = calculateBarycentricCoordinate(tri, point);
//simple clip..
if (tri[0].x > w || tri[0].x < 0 || tri[0].y>h || tri[0].x < 0)continue;
if (isBarycentricCoordInBounds(baryc)){
//these three normal should be the same since they are on the same face (checked)
int intdepth = (int)getZAtCoordinate(baryc, tri);
//atomicMin(int* address, int val)
//reads word old located at the address, computes the minimum of old and val,
//and stores the result back to memory at the same address. returns old
atomicMin(&dev_fmInput[i*w + j].dis, intdepth);
if (dev_fmInput[i*w + j].dis == intdepth){
dev_fmInput[i*w + j].color = dev_primitives[id].v[0].col;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
dev_fmInput[i*w + j].pos = (dev_primitives[id].v[0].pos + dev_primitives[id].v[1].pos + dev_primitives[id].v[2].pos) / 3.f;
}
}
}
}
}
}*/
__global__ void rasterization(Triangle * dev_primitives, Fragment *dev_fmInput, int vertexcount, int w, int h, int N)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < vertexcount / 3.f)
{
//potimized boundingbox;
glm::vec3 tri[3];
for (int i = 0; i < 3; i++){//(-1,1)+1*w/2
//(-10,10)+10*w/20
tri[i] = dev_primitives[id].v[i].pos;
tri[i].x += N;
tri[i].y += N;
tri[i].z += N;
tri[i].x *= w / (float)(2.f*N);
tri[i].y *= h / (float)(2.f*N);
tri[i].z *= w / (float)(2.f*N);
//because the image is cube anyway...I think multiply should have better result than devide...
}
AABB aabb;
aabb = getAABBForTriangle(tri);
for (int i = aabb.min.x - 1; i < aabb.max.x + 1; i++){
for (int j = aabb.min.y - 1; j < aabb.max.y + 1; j++){
glm::vec2 point(i, j);
glm::vec3 baryc = calculateBarycentricCoordinate(tri, point);
//random sample anti-aliansing 1-sample..
thrust::default_random_engine rngx = makeSeededRandomEngine(i, id, 1);
thrust::default_random_engine rngy = makeSeededRandomEngine(j, id, 1);
thrust::uniform_real_distribution<float> u1(0, 1);
thrust::uniform_real_distribution<float> u2(0.5, 0.999);
//simple clip..
point =glm::vec2(i + u1(rngx), j + u1(rngy));
if (tri[0].x > w || tri[0].x < 0 || tri[0].y>h || tri[0].x < 0)continue;
if (isBarycentricCoordInBounds(baryc)){
//these three normal should be the same since they are on the same face (checked)
int intdepth = (int)getZAtCoordinate(baryc, tri);
//atomicMin(int* address, int val)
//reads word old located at the address, computes the minimum of old and val,
//and stores the result back to memory at the same address. returns old
atomicMin(&dev_fmInput[i*w + j].dis, intdepth);
if (dev_fmInput[i*w + j].dis == intdepth){
dev_fmInput[i*w + j].color = dev_primitives[id].v[0].col;
dev_fmInput[i*w + j].normal = dev_primitives[id].v[0].nor;
dev_fmInput[i*w + j].pos = (dev_primitives[id].v[0].pos + dev_primitives[id].v[1].pos + dev_primitives[id].v[2].pos) / 3.f;
}
}
}
}
}
}
__global__ void Tesselation(bool active, VertexOut *dev_vertin, Triangle *dev_triout, int vercount)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (active&&id < vercount / 3.f)
{
int tessel_number = 3;
glm::vec3 tri[3];
tri[0] = dev_vertin[3 * id].pos;
tri[1] = dev_vertin[3 * id + 1].pos;
tri[2] = dev_vertin[3 * id + 2].pos;
//default tesselation,generate 4 triangles automativaly
glm::vec3 vnew[3];
vnew[0] = (tri[0] + tri[1]) / 2.f;
vnew[1] = (tri[0] + tri[2]) / 2.f;
vnew[2] = (tri[2] + tri[1]) / 2.f;
dev_triout[4 * id].v[0].pos = tri[0];
dev_triout[4 * id].v[1].pos = vnew[0];
dev_triout[4 * id].v[2].pos = vnew[1];
dev_triout[4 * id + 1].v[0].pos = vnew[0];
dev_triout[4 * id + 1].v[1].pos = tri[1];
dev_triout[4 * id + 1].v[2].pos = vnew[2];
dev_triout[4 * id + 2].v[0].pos = vnew[0];
dev_triout[4 * id + 2].v[1].pos = vnew[2];
dev_triout[4 * id + 2].v[2].pos = vnew[1];
dev_triout[4 * id + 3].v[0].pos = vnew[1];
dev_triout[4 * id + 3].v[1].pos = vnew[2];
dev_triout[4 * id + 3].v[2].pos = tri[2];
/*for (int i = 0; i < 4; i++){
for (int j = 0; j < 3; j++)
{
dev_triout[4 * id + i].v[j].nor = dev_vertin[3 * id].nor;
}
}*/
//in order to check :change the normal a little
for (int i = 0; i < 3; i++){
{
dev_triout[4 * id].v[i].nor = glm::normalize(dev_vertin[3 * id].nor + glm::vec3(0.3, 0, 0));
dev_triout[4 * id + 1].v[i].nor = glm::normalize(dev_vertin[3 * id].nor + glm::vec3(0, 0.3, 0));
dev_triout[4 * id + 2].v[i].nor = glm::normalize(dev_vertin[3 * id].nor + glm::vec3(0, 0, 0));
dev_triout[4 * id + 3].v[i].nor = glm::normalize(dev_vertin[3 * id].nor + glm::vec3(0, 0, 0.3));
}
}
}
}
/* scan_line:brute force
glm::vec3 tri[3];
for (int i = 0; i < 3; i++){
tri[i] = dev_primitives[id].v[i].pos;
tri[i].x += 1;
tri[i].y += 1;
tri[i].x *= w / 2.f;
tri[i].y *= h / 2.f;
}
for (int i = 0; i < w; i++){
for (int j = 0; j < h; j++){
glm::vec2 point(i, j);
glm::vec3 baryc = calculateBarycentricCoordinate(tri, point);
if (isBarycentricCoordInBounds(baryc)){
dev_fmInput[i*w + j].color = glm::vec3(1, 0, 0);
}
}*/
glm::vec3 SetLight()
{
glm::vec3 light_pos = glm::vec3(2, 1, 2);
return light_pos;
}
//blin phong
/*__global__ void antialiansing(Triangle *dev_in,Triangle *dev_out,int trianglecount)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < trianglecount)
{
}
}*/
//input output depthbuffer
__global__ void fragmentShading(Fragment *dev_fmInput, Fragment *dev_fmOutput, int w, int h, glm::vec3 light_pos, glm::vec3 camera_pos, bool defaultbackground)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (id < w*h){
__syncthreads();
///glm::vec3 ccc = (dev_fmInput[id].subcolor[0] + dev_fmInput[id].subcolor[1] + dev_fmInput[id].subcolor[2] + dev_fmInput[id].subcolor[3]) / 4.f;
glm::vec3 ccc = dev_fmInput[id].color;
float specular_power = 100;
glm::vec3 specular_color = glm::vec3(1, 1, 1);//dev_fmInput[id].color;
glm::vec3 lightray = light_pos - dev_fmInput[id].pos;
glm::vec3 inray = camera_pos - dev_fmInput[id].pos;
glm::vec3 H = glm::normalize(inray) + glm::normalize(lightray);
H = glm::vec3(H.x / 2.0, H.y / 2.0, H.z / 2.0);
float hdot = glm::dot(H, dev_fmInput[id].normal);
float x = pow(hdot, specular_power);
if (x < 0)x = 0.f;
glm::vec3 spec = x*specular_color;
glm::vec3 Lambert = glm::vec3(1, 1, 1);
glm::vec3 Ambient = ccc;
float diffuse = glm::clamp(glm::dot(dev_fmInput[id].normal, glm::normalize(lightray)), 0.0f, 1.0f);
Lambert *= diffuse;
glm::vec3 phong_color = 0.5f*spec + 0.4f*Lambert + 0.1f*Ambient;//where is ambient light?
phong_color = glm::clamp(phong_color, 0.f, 1.f);
//dev_fmOutput[id].color = phong_color;
//blending
//DestinationColor.rgb = (SourceColor.rgb * One) + (DestinationColor.rgb * (1 - SourceColor.a));
if (Blending){
if (defaultbackground)
{
glm::vec3 background = glm::vec3(0, 0, 1);
float default_a = 0.8;
dev_fmOutput[id].color = phong_color + (background * (1 - default_a));
}
else
{
float depth = dev_fmInput[id].dis;
if (depth > 0) {
dev_fmOutput[id].color = glm::vec3(0.8, 0.8, 0.8);
}
else dev_fmOutput[id].color = (-depth)* phong_color + (1 + depth)*glm::vec3(0.8, 0.8, 0.8);
}
}
else dev_fmOutput[id].color = phong_color;
}
}
/*
* Perform rasterization.
*/
void RotateAboutRight(float deg, glm::vec3 &ref, const glm::vec3 right, const glm::vec3 eye)
{
deg *= DEG2RAD;
glm::mat4 rotation = glm::rotate(glm::mat4(1.0f), deg, right);
ref = ref - eye;
ref = glm::vec3(rotation * glm::vec4(ref, 1));
ref = ref + eye;
}
void TranslateAlongRight(float amt, glm::vec3 &ref, const glm::vec3 right, glm::vec3 &eye)
{
glm::vec3 translation = right * amt;
eye += translation;
ref += translation;
}
void RotateAboutUp(float deg, glm::vec3 &ref, const glm::vec3 right, const glm::vec3 eye, const glm::vec3 up)
{
deg *= DEG2RAD;
glm::mat4 rotation = glm::rotate(glm::mat4(1.0f), deg, up);
ref = ref - eye;
ref = glm::vec3(rotation * glm::vec4(ref, 1));
ref = ref + eye;
}
void TranslateAlongLook(float amt, const glm::vec3 look, glm::vec3 &eye, glm::vec3 & ref)
{
glm::vec3 translation = look * amt;
eye += translation;
ref += translation;
}
void TranslateAlongUp(float amt, glm::vec3 &eye, glm::vec3 & ref, const glm::vec3 up)
{
glm::vec3 translation = up * amt;
eye += translation;
ref += translation;
}
glm::mat4 camera(float x_trans_amount, float y_trans_amount, float up_angle_amount, float right_angle_amount, glm::vec3 &camerapos)
{
glm::vec3 eye = glm::vec3(3, 0, 3);
glm::vec3 up = glm::vec3(0, 1, 0);
glm::vec3 ref = glm::vec3(0, 0, 0);
camerapos = eye;
float near_clip = 1.0f;
float far_clip = 1000.f;
float width = 800;
float height = 800;
float aspect = (float)width / (float)height;
float fovy = 45.f;
glm::vec3 world_up = glm::vec3(0, 1, 0);
glm::vec3 look = glm::normalize(ref - eye);
glm::vec3 right = glm::normalize(glm::cross(look, world_up));
RotateAboutRight(right_angle_amount, ref, right, eye);
RotateAboutUp(up_angle_amount, ref, right, eye, up);
TranslateAlongRight(x_trans_amount, ref, right, eye);
TranslateAlongUp(y_trans_amount, eye, ref, up);
glm::mat4 viewMatrix = glm::lookAt(eye, ref, up);
glm::mat4 projectionMatrix = glm::perspective(fovy, aspect, near_clip, far_clip);//fovy,aspect, zNear, zFar;
glm::mat4 getViewProj = projectionMatrix*viewMatrix;
return getViewProj;
}
void rasterize(uchar4 *pbo, float amt_x, float amt_y, float up_a, float right_a)
{
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
//key_test:
//std::cout << "ss " << amt_x << "and " << amt_y << std::endl;
//std::cout << "dd" << up_a << "and " << right_a << std::endl;
//step1.vertex shading
int blockSize1d = 256;
int blockCount1d = (vertCount + blockSize1d - 1) / blockSize1d;
int image_blockSize1d = 256;
int image_blockCount1d = (width*height + image_blockSize1d - 1) / image_blockSize1d;
glm::vec3 camera_pos = glm::vec3(0);
glm::vec3 light_pos = SetLight();
glm::mat4 getViewProj = camera(amt_x,amt_y, up_a,right_a, camera_pos);
//glm::mat4 getViewProj = glm::mat4(1);
//clean depth buffer
cleanDepth << < image_blockCount1d, image_blockSize1d >> >(dev_depthbuffer, dev_fmInput, width, height);
checkCUDAError("clean");
vertexShader << <blockCount1d, blockSize1d >> >(dev_bufVertex, dev_vsOutput, vertCount, getViewProj);
checkCUDAError("vertexShader");
//step2.primitive assembly
int blockCount1d_tri;
bool tesselation = Tess;
if (!tesselation)
{
//vertexnumber: vertcount,triangle number:vertcount/3.0
blockCount1d_tri = blockCount1d / 3 + 1;
PrimitiveAssembly << < blockCount1d_tri, blockSize1d >> >(dev_vsOutput, dev_primitives, vertCount);
checkCUDAError("PrimitiveAssembly");
rasterization << < blockCount1d_tri, blockSize1d >> >(dev_primitives, dev_fmInput, vertCount, width, height, N);
checkCUDAError("rasterization");
}
else
{
blockCount1d_tri = blockCount1d / 3 * 4 + 1;
//vertexnumber: vertcount*12,triangle number:vertcount*12/3.0
Tesselation << <blockCount1d_tri, blockSize1d >> >(1, dev_vsOutput, dev_primitives, vertCount);
checkCUDAError("Tesselation");
rasterization << < blockCount1d_tri, blockSize1d >> >(dev_primitives, dev_fmInput, vertCount * 4, width, height, N);
checkCUDAError("rasterization");
}
//blin-phong+blending
fragmentShading << <image_blockCount1d, image_blockSize1d >> >(dev_fmInput, dev_depthbuffer, width, height, light_pos, camera_pos, 1);
checkCUDAError("shading");
//blending << <image_blockCount1d, image_blockSize1d >> >(dev_fmOutput, dev_depthbuffer, N, 1);
checkCUDAError("blending");
render << <blockCount2d, blockSize2d >> >(width, height, dev_depthbuffer, dev_framebuffer);
sendImageToPBO << <blockCount2d, blockSize2d >> >(pbo, width, height, dev_framebuffer);
checkCUDAError("sendToPBO");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
cudaFree(dev_bufIdx);
dev_bufIdx = NULL;
cudaFree(dev_bufVertex);
dev_bufVertex = NULL;
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_vsOutput);
dev_fmInput = NULL;
cudaFree(dev_fmInput);
dev_fmInput = NULL;
cudaFree(dev_depthbuffer);
dev_depthbuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
checkCUDAError("rasterizeFree");
}
|
9c5982ed11b3ba9c94fcdb9cdee3ab029b28f2f6.hip | // !!! This is a file automatically generated by hipify!!!
/**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define POLYBENCH_TIME 1
#include "atax.cuh"
#include <polybench.h>
#include <polybenchUtilFuncts.h>
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
#ifndef M_PI
#define M_PI 3.14159
#endif
#define RUN_ON_CPU
void init_array(int nx, int ny, DATA_TYPE POLYBENCH_1D(x,NX,nx), DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny))
{
int i, j;
for (i = 0; i < nx; i++)
{
x[i] = i * M_PI;
for (j = 0; j < ny; j++)
{
A[i][j] = ((DATA_TYPE) i*j) / NX;
}
}
}
void compareResults(int ny, DATA_TYPE POLYBENCH_1D(z,NY,ny), DATA_TYPE POLYBENCH_1D(z_outputFromGpu,NY,ny))
{
int i, fail;
fail = 0;
for (i=0; i<ny; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(int nx, int ny, DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < _PB_NX)
{
tmp[i] = 0;
int j;
for(j=0; j < _PB_NY; j++)
{
tmp[i] += A[i*NY+j] * x[j];
}
}
}
__global__ void atax_kernel2(int nx, int ny, DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < _PB_NY)
{
y[j] = 0;
int i;
for(i=0; i < _PB_NX; i++)
{
y[j] += A[i*NY+j] * tmp[i];
}
}
}
void atax_cpu(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i,j;
for (i= 0; i < _PB_NY; i++)
{
y[i] = 0;
}
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
{
tmp[i] = tmp[i] + A[i][j] * x[j];
}
for (j = 0; j < _PB_NY; j++)
{
y[j] = y[j] + A[i][j] * tmp[i];
}
}
}
void ataxGpu(int nx, int ny, DATA_TYPE POLYBENCH_2D(A, NX, NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NX,nx), DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx), DATA_TYPE POLYBENCH_1D(y_outputFromGpu,NY,ny))
{
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice);
hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
/* Start timer. */
polybench_start_instruments;
hipLaunchKernelGGL(( atax_kernel1), dim3(grid1), dim3(block) , 0, 0, nx, ny, A_gpu,x_gpu,tmp_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( atax_kernel2), dim3(grid2), dim3(block) , 0, 0, nx, ny, A_gpu,y_gpu,tmp_gpu);
hipDeviceSynchronize();
/* Stop and print timer. */
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(x_gpu);
hipFree(y_gpu);
hipFree(tmp_gpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
int main(int argc, char** argv)
{
int nx = NX;
int ny = NY;
POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,NX,NY,nx,ny);
POLYBENCH_1D_ARRAY_DECL(x,DATA_TYPE,NY,ny);
POLYBENCH_1D_ARRAY_DECL(y,DATA_TYPE,NY,ny);
POLYBENCH_1D_ARRAY_DECL(y_outputFromGpu,DATA_TYPE,NY,ny);
POLYBENCH_1D_ARRAY_DECL(tmp,DATA_TYPE,NX,nx);
init_array(nx, ny, POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(A));
GPU_argv_init();
ataxGpu(nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp),
POLYBENCH_ARRAY(y_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
atax_cpu(nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(ny, POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(y_outputFromGpu));
#else //prevent dead code elimination
polybench_prevent_dce(print_array(ny, POLYBENCH_ARRAY(y_outputFromGpu)));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(y_outputFromGpu);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
#include <polybench.c> | 9c5982ed11b3ba9c94fcdb9cdee3ab029b28f2f6.cu | /**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#define POLYBENCH_TIME 1
#include "atax.cuh"
#include <polybench.h>
#include <polybenchUtilFuncts.h>
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
#ifndef M_PI
#define M_PI 3.14159
#endif
#define RUN_ON_CPU
void init_array(int nx, int ny, DATA_TYPE POLYBENCH_1D(x,NX,nx), DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny))
{
int i, j;
for (i = 0; i < nx; i++)
{
x[i] = i * M_PI;
for (j = 0; j < ny; j++)
{
A[i][j] = ((DATA_TYPE) i*j) / NX;
}
}
}
void compareResults(int ny, DATA_TYPE POLYBENCH_1D(z,NY,ny), DATA_TYPE POLYBENCH_1D(z_outputFromGpu,NY,ny))
{
int i, fail;
fail = 0;
for (i=0; i<ny; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(int nx, int ny, DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < _PB_NX)
{
tmp[i] = 0;
int j;
for(j=0; j < _PB_NY; j++)
{
tmp[i] += A[i*NY+j] * x[j];
}
}
}
__global__ void atax_kernel2(int nx, int ny, DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < _PB_NY)
{
y[j] = 0;
int i;
for(i=0; i < _PB_NX; i++)
{
y[j] += A[i*NY+j] * tmp[i];
}
}
}
void atax_cpu(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i,j;
for (i= 0; i < _PB_NY; i++)
{
y[i] = 0;
}
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
{
tmp[i] = tmp[i] + A[i][j] * x[j];
}
for (j = 0; j < _PB_NY; j++)
{
y[j] = y[j] + A[i][j] * tmp[i];
}
}
}
void ataxGpu(int nx, int ny, DATA_TYPE POLYBENCH_2D(A, NX, NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NX,nx), DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx), DATA_TYPE POLYBENCH_1D(y_outputFromGpu,NY,ny))
{
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice);
cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
/* Start timer. */
polybench_start_instruments;
atax_kernel1<<< grid1, block >>>(nx, ny, A_gpu,x_gpu,tmp_gpu);
cudaThreadSynchronize();
atax_kernel2<<< grid2, block >>>(nx, ny, A_gpu,y_gpu,tmp_gpu);
cudaThreadSynchronize();
/* Stop and print timer. */
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(x_gpu);
cudaFree(y_gpu);
cudaFree(tmp_gpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
int main(int argc, char** argv)
{
int nx = NX;
int ny = NY;
POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,NX,NY,nx,ny);
POLYBENCH_1D_ARRAY_DECL(x,DATA_TYPE,NY,ny);
POLYBENCH_1D_ARRAY_DECL(y,DATA_TYPE,NY,ny);
POLYBENCH_1D_ARRAY_DECL(y_outputFromGpu,DATA_TYPE,NY,ny);
POLYBENCH_1D_ARRAY_DECL(tmp,DATA_TYPE,NX,nx);
init_array(nx, ny, POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(A));
GPU_argv_init();
ataxGpu(nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp),
POLYBENCH_ARRAY(y_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
atax_cpu(nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(ny, POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(y_outputFromGpu));
#else //prevent dead code elimination
polybench_prevent_dce(print_array(ny, POLYBENCH_ARRAY(y_outputFromGpu)));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(y_outputFromGpu);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
#include <polybench.c> |
428dc3085e8ca723f5cdce799ed7111013f0c443.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
*/
/* Matrix multiplication: C = A * B.
* Host code.
* performant generic kernel for matrix multiplication.
*
*/
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include "dgemm_cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <assert.h>
#include <math.h>
#include <fstream>
#include <vector>
#include <iostream>
#include <algorithm>
typedef struct {
double *local_A;
double *local_B;
double *local_C;
int m;
int n_threads;
int tid;
int rank;
} ThreadsInfo;
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( hipError_t err, const char *file, const int line )
{
if( hipSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
hipDeviceProp_t deviceProp;
checkCudaErrors( hipGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1); \
}
checkCudaErrors( hipSetDevice(devID) );
//printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
// Initialization code to find the best CUDA Device
// end of CUDA Helper Functions
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void randomInit(float*, int);
void inline checkError(hipblasStatus_t status, const char* msg)
{
if(status != HIPBLAS_STATUS_SUCCESS){
printf(msg);
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
extern "C" void doCUDA(void *ptr)
{
ThreadsInfo *threads_info = (ThreadsInfo *)ptr;
int size = threads_info->m;
int devID;
hipDeviceProp_t props;
// get number of SMs on this GPU
checkCudaErrors(hipGetDevice(&devID));
checkCudaErrors(hipGetDeviceProperties(&props, devID));
// use a larger block size for Fermi and above
int block_size = (props.major < 2) ? 16 : 32;
srand(2006);
// Optional Command-line multiplier for matrix sizes
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = size;
uiHA = size;
uiWB = size;
uiHB = size;
uiWC = size;
uiHC = size;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(hipMalloc((void**) &d_A, mem_size_A));
checkCudaErrors(hipMalloc((void**) &d_B, mem_size_B));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) );
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) );
checkCudaErrors(hipMalloc((void**) &d_C, mem_size_C));
//setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(uiWC / threads.x, uiHC / threads.y);
// CUBLAS version 2.0
hipblasHandle_t handle;
checkError(hipblasCreate(&handle), "hipblasCreate() error!\n");
const float alpha = 1.0f;
const float beta = 0.0f;
hipblasStatus_t ret = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, uiWB, uiHA, uiWA, &alpha, d_B, uiWB, d_A, uiWA, &beta, d_C, uiWA);
checkError(ret, "cublas Sgemm returned an error!\n");
getLastCudaError("CUBLAS Kernel execution failed");
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(h_CUBLAS, d_C, mem_size_C, hipMemcpyDeviceToHost) );
checkError(hipblasDestroy(handle), "hipblasDestroy() error!\n");
hipDeviceSynchronize();
getLastCudaError("CUDA matrixMul Kernel execution failed");
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) );
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
hipDeviceReset();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
| 428dc3085e8ca723f5cdce799ed7111013f0c443.cu | /*
*
*/
/* Matrix multiplication: C = A * B.
* Host code.
* performant generic kernel for matrix multiplication.
*
*/
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include "dgemm_cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <assert.h>
#include <math.h>
#include <fstream>
#include <vector>
#include <iostream>
#include <algorithm>
typedef struct {
double *local_A;
double *local_B;
double *local_C;
int m;
int n_threads;
int tid;
int rank;
} ThreadsInfo;
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
cudaDeviceProp deviceProp;
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1); \
}
checkCudaErrors( cudaSetDevice(devID) );
//printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
// Initialization code to find the best CUDA Device
// end of CUDA Helper Functions
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void randomInit(float*, int);
void inline checkError(cublasStatus_t status, const char* msg)
{
if(status != CUBLAS_STATUS_SUCCESS){
printf(msg);
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
extern "C" void doCUDA(void *ptr)
{
ThreadsInfo *threads_info = (ThreadsInfo *)ptr;
int size = threads_info->m;
int devID;
cudaDeviceProp props;
// get number of SMs on this GPU
checkCudaErrors(cudaGetDevice(&devID));
checkCudaErrors(cudaGetDeviceProperties(&props, devID));
// use a larger block size for Fermi and above
int block_size = (props.major < 2) ? 16 : 32;
srand(2006);
// Optional Command-line multiplier for matrix sizes
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = size;
uiHA = size;
uiWB = size;
uiHB = size;
uiWC = size;
uiHC = size;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(cudaMalloc((void**) &d_A, mem_size_A));
checkCudaErrors(cudaMalloc((void**) &d_B, mem_size_B));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaMalloc((void**) &d_C, mem_size_C));
//setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(uiWC / threads.x, uiHC / threads.y);
// CUBLAS version 2.0
cublasHandle_t handle;
checkError(cublasCreate(&handle), "cublasCreate() error!\n");
const float alpha = 1.0f;
const float beta = 0.0f;
cublasStatus_t ret = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, uiWB, uiHA, uiWA, &alpha, d_B, uiWB, d_A, uiWA, &beta, d_C, uiWA);
checkError(ret, "cublas Sgemm returned an error!\n");
getLastCudaError("CUBLAS Kernel execution failed");
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(h_CUBLAS, d_C, mem_size_C, cudaMemcpyDeviceToHost) );
checkError(cublasDestroy(handle), "cublasDestroy() error!\n");
cudaDeviceSynchronize();
getLastCudaError("CUDA matrixMul Kernel execution failed");
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) );
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
cudaDeviceReset();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
|
680d11af75c4829864425d7d241ffd0010493c97.hip | // !!! This is a file automatically generated by hipify!!!
#include "MatrixDevice.h"
#include "debug.h"
namespace gpu {
MatrixDevice::MatrixDevice() {
rows_ = cols_ = offset_ = 0;
buffer_ = NULL;
}
MatrixDevice::MatrixDevice(int rows, int cols) {
rows_ = rows;
cols_ = cols;
offset_ = 1;
checkCudaErrors(hipMalloc(&buffer_, sizeof(double) * rows_ * cols_ * offset_));
checkCudaErrors(hipMemset(buffer_, 0, sizeof(double) * rows_ * cols_ * offset_));
checkCudaErrors(hipDeviceSynchronize());
}
}
| 680d11af75c4829864425d7d241ffd0010493c97.cu | #include "MatrixDevice.h"
#include "debug.h"
namespace gpu {
MatrixDevice::MatrixDevice() {
rows_ = cols_ = offset_ = 0;
buffer_ = NULL;
}
MatrixDevice::MatrixDevice(int rows, int cols) {
rows_ = rows;
cols_ = cols;
offset_ = 1;
checkCudaErrors(cudaMalloc(&buffer_, sizeof(double) * rows_ * cols_ * offset_));
checkCudaErrors(cudaMemset(buffer_, 0, sizeof(double) * rows_ * cols_ * offset_));
checkCudaErrors(cudaDeviceSynchronize());
}
}
|
fa806141d457f0ef54a0e7b6e296f6b1ff34e113.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <iostream>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
// namespace {
__global__ void SCAN_NN_Mask_Fill_cuda_kernel(
torch::PackedTensorAccessor<int64_t,1,torch::RestrictPtrTraits,size_t> p_idx,
torch::PackedTensorAccessor<int64_t,2,torch::RestrictPtrTraits,size_t> nearest_neighbors,
torch::PackedTensorAccessor<int32_t,2,torch::RestrictPtrTraits,size_t> output_mask) {
// //batch index
// const int row_idx = blockIdx.y;
// // column index
// const int col_idx = blockIdx.x * blockDim.x + threadIdx.x;
const uint32_t row_idx = blockIdx.y * blockDim.y + threadIdx.y;
const uint32_t col_idx = blockIdx.x * blockDim.x + threadIdx.x;
const uint32_t neighbor_idx = blockIdx.z;
if(row_idx >= output_mask.size(0) || col_idx >= output_mask.size(1) || col_idx >= p_idx.size(0) || neighbor_idx >= nearest_neighbors.size(1) || row_idx >= nearest_neighbors.size(0)) {return;}
// int32_t isneighbor = (p_idx[col_idx] == nearest_neighbors[row_idx][neighbor_idx]) and !(col_idx == row_idx);
int32_t isneighbor = (p_idx[col_idx] == nearest_neighbors[row_idx][neighbor_idx]);
// and !(col_idx == row_idx);
atomicOr(&output_mask[row_idx][col_idx], isneighbor);
}
// } // namespace
void SCAN_NN_Mask_Fill_cuda(
torch::Tensor p_idx,
torch::Tensor nearest_neighbors,
torch::Tensor output_mask)
{
// const int threads = 1024;
const auto b_size = p_idx.size(0);
const auto n_nearest = nearest_neighbors.size(1);
const dim3 threadsPerBlock(32, 32, 1);
const dim3 blocks((b_size + threadsPerBlock.x - 1) / threadsPerBlock.x, (b_size + threadsPerBlock.y - 1) / threadsPerBlock.y, n_nearest);
// assert(p_idx.size(0) == n_nearest.size(0), "p_idx and nearest size at dim 0 must be equal.");
// assert(p_idx.size(0) == output_mask.size(0), "output mask should match dimensions of p_idx");
// assert(p_idx.size(0) == output_mask.size(1), "output mask should match dimensions of p_idx");
// std::cout << "test " << p_idx.size(0) << std::endl;
// std::cout << "test " << nearest_neighbors.size(0) << std::endl;
assert(p_idx.size(0) == nearest_neighbors.size(0));
assert(p_idx.size(0) == output_mask.size(0));
assert(p_idx.size(0) == output_mask.size(1));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(p_idx.device().index());
hipLaunchKernelGGL(( SCAN_NN_Mask_Fill_cuda_kernel), dim3(blocks), dim3(threadsPerBlock), 0, stream,
p_idx.packed_accessor<int64_t,1,torch::RestrictPtrTraits,size_t>(),
nearest_neighbors.packed_accessor<int64_t,2,torch::RestrictPtrTraits,size_t>(),
output_mask.packed_accessor<int32_t,2,torch::RestrictPtrTraits,size_t>());
}
| fa806141d457f0ef54a0e7b6e296f6b1ff34e113.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <iostream>
#include <c10/cuda/CUDAStream.h>
// namespace {
__global__ void SCAN_NN_Mask_Fill_cuda_kernel(
torch::PackedTensorAccessor<int64_t,1,torch::RestrictPtrTraits,size_t> p_idx,
torch::PackedTensorAccessor<int64_t,2,torch::RestrictPtrTraits,size_t> nearest_neighbors,
torch::PackedTensorAccessor<int32_t,2,torch::RestrictPtrTraits,size_t> output_mask) {
// //batch index
// const int row_idx = blockIdx.y;
// // column index
// const int col_idx = blockIdx.x * blockDim.x + threadIdx.x;
const uint32_t row_idx = blockIdx.y * blockDim.y + threadIdx.y;
const uint32_t col_idx = blockIdx.x * blockDim.x + threadIdx.x;
const uint32_t neighbor_idx = blockIdx.z;
if(row_idx >= output_mask.size(0) || col_idx >= output_mask.size(1) || col_idx >= p_idx.size(0) || neighbor_idx >= nearest_neighbors.size(1) || row_idx >= nearest_neighbors.size(0)) {return;}
// int32_t isneighbor = (p_idx[col_idx] == nearest_neighbors[row_idx][neighbor_idx]) and !(col_idx == row_idx);
int32_t isneighbor = (p_idx[col_idx] == nearest_neighbors[row_idx][neighbor_idx]);
// and !(col_idx == row_idx);
atomicOr(&output_mask[row_idx][col_idx], isneighbor);
}
// } // namespace
void SCAN_NN_Mask_Fill_cuda(
torch::Tensor p_idx,
torch::Tensor nearest_neighbors,
torch::Tensor output_mask)
{
// const int threads = 1024;
const auto b_size = p_idx.size(0);
const auto n_nearest = nearest_neighbors.size(1);
const dim3 threadsPerBlock(32, 32, 1);
const dim3 blocks((b_size + threadsPerBlock.x - 1) / threadsPerBlock.x, (b_size + threadsPerBlock.y - 1) / threadsPerBlock.y, n_nearest);
// assert(p_idx.size(0) == n_nearest.size(0), "p_idx and nearest size at dim 0 must be equal.");
// assert(p_idx.size(0) == output_mask.size(0), "output mask should match dimensions of p_idx");
// assert(p_idx.size(0) == output_mask.size(1), "output mask should match dimensions of p_idx");
// std::cout << "test " << p_idx.size(0) << std::endl;
// std::cout << "test " << nearest_neighbors.size(0) << std::endl;
assert(p_idx.size(0) == nearest_neighbors.size(0));
assert(p_idx.size(0) == output_mask.size(0));
assert(p_idx.size(0) == output_mask.size(1));
auto stream = at::cuda::getCurrentCUDAStream(p_idx.device().index());
SCAN_NN_Mask_Fill_cuda_kernel<<<blocks, threadsPerBlock, 0, stream>>>(
p_idx.packed_accessor<int64_t,1,torch::RestrictPtrTraits,size_t>(),
nearest_neighbors.packed_accessor<int64_t,2,torch::RestrictPtrTraits,size_t>(),
output_mask.packed_accessor<int32_t,2,torch::RestrictPtrTraits,size_t>());
}
|
7f2ced0a293601367d4b166cad176a281880702e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_computeCRLB (int n,int sizeMatrix,double *output, double *input,double h)
{
int sizeSubImage=n/(sizeMatrix*sizeMatrix);
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
int p=(id/sizeSubImage);//p=0..24
int positpix=id%sizeSubImage;//0..sizeSubImage-1
int line=p/(sizeMatrix);//0..4
int column=p%(sizeMatrix);//0..4
double d1=(input[positpix+sizeSubImage*(line*2+2)]-input[positpix+sizeSubImage*(line*2+1)])/(2*h);
double d2=(input[positpix+sizeSubImage*(column*2+2)]-input[positpix+sizeSubImage*(column*2+1)])/(2*h);
if (input[positpix]>0){
output[id]=(d1*d2)/(input[positpix]);
}
else{
output[id]=100000000;
}
}
} | 7f2ced0a293601367d4b166cad176a281880702e.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_computeCRLB (int n,int sizeMatrix,double *output, double *input,double h)
{
int sizeSubImage=n/(sizeMatrix*sizeMatrix);
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
int p=(id/sizeSubImage);//p=0..24
int positpix=id%sizeSubImage;//0..sizeSubImage-1
int line=p/(sizeMatrix);//0..4
int column=p%(sizeMatrix);//0..4
double d1=(input[positpix+sizeSubImage*(line*2+2)]-input[positpix+sizeSubImage*(line*2+1)])/(2*h);
double d2=(input[positpix+sizeSubImage*(column*2+2)]-input[positpix+sizeSubImage*(column*2+1)])/(2*h);
if (input[positpix]>0){
output[id]=(d1*d2)/(input[positpix]);
}
else{
output[id]=100000000;
}
}
} |
2d3cb55d0b3e6b5b1e8e56cc35b010c727aa6890.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <ctime>
__global__ void matmulDevice(int *A, int *B, int *C, int N)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
int sum = 0;
for (int i = 0; i < N; i++)
sum += A[row * N + i] * B[i * N + col];
C[row * N + col] = sum;
}
}
void matmulHost(int *A, int *B, int *C, int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int sum = 0;
for (int k = 0; k < N; k++)
sum += A[i * N + k] * B[k * N + j];
C[i * N + j] = sum;
}
}
}
//Sourced online to check CudaErrors while calling methods
#define checkCudaErrors(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
using namespace std;
int main(void)
{
int device_count = 0;
hipGetDeviceCount(&device_count);
if (device_count == 0)
cout << "Sorry! You dont have CudaDevice" << endl;
else
cout << "CudaDevice found! Device count: " << device_count << endl;
int N = 2048; //Depending on your device
int block_size = 32;
int nIter = 1;
unsigned int count = N * N;
unsigned int mem_size = sizeof(int) * count;
//Allocate memory of the indicated size
int *A = (int *)malloc(mem_size);
int *B = (int *)malloc(mem_size);
int *h_C = (int *)malloc(mem_size);
int *hCuda_C = (int *)malloc(mem_size);
int *d_A, *d_B, *d_C;
//Pupulate the host arrays (to be copied to device arrays)
for (int i = 0; i < count; i++)
{
A[i] = rand() % 100 + 1;
B[i] = rand() % 100 + 1;
}
unsigned int start_time = clock();
for (int j = 0; j < nIter; j++)
{
matmulHost(A, B, h_C, N);
}
unsigned int elapsedTime = clock() - start_time;
float msecPerMatrixMulCpu = elapsedTime / nIter;
cout << "CPU time: " << msecPerMatrixMulCpu << endl;
checkCudaErrors(hipMalloc((void **)&d_A, mem_size));
checkCudaErrors(hipMalloc((void **)&d_B, mem_size));
checkCudaErrors(hipMalloc((void **)&d_C, mem_size));
checkCudaErrors(hipMemcpy(d_A, A, mem_size,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, B, mem_size,
hipMemcpyHostToDevice));
dim3 threadsPerBlock(block_size, block_size);
dim3 blocksPerGrid(N / block_size, N / block_size);
hipEvent_t start;
hipEvent_t stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, 0));
for (int j = 0; j < nIter; j++)
{
hipLaunchKernelGGL(( matmulDevice), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N);
}
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
float msecPerMatrixMul = msecTotal / nIter;
cout << "GPU time: " << msecPerMatrixMul << endl;
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(hCuda_C, d_C, mem_size, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
// free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
bool test = true;
for (int i = 0; i < count; i++)
{
if (h_C[i] != hCuda_C[i])
test = false;
}
if (test)
cout << "CUDA Array Multiplication Successful" << endl;
else
cout << "CUDA Array Multiplication Failed" << endl;
return 0;
}
| 2d3cb55d0b3e6b5b1e8e56cc35b010c727aa6890.cu |
#include <cuda_runtime.h>
#include <iostream>
#include <ctime>
__global__ void matmulDevice(int *A, int *B, int *C, int N)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
int sum = 0;
for (int i = 0; i < N; i++)
sum += A[row * N + i] * B[i * N + col];
C[row * N + col] = sum;
}
}
void matmulHost(int *A, int *B, int *C, int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int sum = 0;
for (int k = 0; k < N; k++)
sum += A[i * N + k] * B[k * N + j];
C[i * N + j] = sum;
}
}
}
//Sourced online to check CudaErrors while calling methods
#define checkCudaErrors(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
using namespace std;
int main(void)
{
int device_count = 0;
cudaGetDeviceCount(&device_count);
if (device_count == 0)
cout << "Sorry! You dont have CudaDevice" << endl;
else
cout << "CudaDevice found! Device count: " << device_count << endl;
int N = 2048; //Depending on your device
int block_size = 32;
int nIter = 1;
unsigned int count = N * N;
unsigned int mem_size = sizeof(int) * count;
//Allocate memory of the indicated size
int *A = (int *)malloc(mem_size);
int *B = (int *)malloc(mem_size);
int *h_C = (int *)malloc(mem_size);
int *hCuda_C = (int *)malloc(mem_size);
int *d_A, *d_B, *d_C;
//Pupulate the host arrays (to be copied to device arrays)
for (int i = 0; i < count; i++)
{
A[i] = rand() % 100 + 1;
B[i] = rand() % 100 + 1;
}
unsigned int start_time = clock();
for (int j = 0; j < nIter; j++)
{
matmulHost(A, B, h_C, N);
}
unsigned int elapsedTime = clock() - start_time;
float msecPerMatrixMulCpu = elapsedTime / nIter;
cout << "CPU time: " << msecPerMatrixMulCpu << endl;
checkCudaErrors(cudaMalloc((void **)&d_A, mem_size));
checkCudaErrors(cudaMalloc((void **)&d_B, mem_size));
checkCudaErrors(cudaMalloc((void **)&d_C, mem_size));
checkCudaErrors(cudaMemcpy(d_A, A, mem_size,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, B, mem_size,
cudaMemcpyHostToDevice));
dim3 threadsPerBlock(block_size, block_size);
dim3 blocksPerGrid(N / block_size, N / block_size);
cudaEvent_t start;
cudaEvent_t stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));
for (int j = 0; j < nIter; j++)
{
matmulDevice<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
}
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
float msecPerMatrixMul = msecTotal / nIter;
cout << "GPU time: " << msecPerMatrixMul << endl;
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(hCuda_C, d_C, mem_size, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
// free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
bool test = true;
for (int i = 0; i < count; i++)
{
if (h_C[i] != hCuda_C[i])
test = false;
}
if (test)
cout << "CUDA Array Multiplication Successful" << endl;
else
cout << "CUDA Array Multiplication Failed" << endl;
return 0;
}
|
8a1733f6fcdaa13ae07e72e5bfeb3a857f90f651.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_addPhotonsAndBackgroundMany_scmos (int n, int sizeSubImage,double *output, double *input, double *photonAndBackground, double * scmos)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
int id2=id/sizeSubImage;
if (id < n)
{
output[id]=input[id]*photonAndBackground[id2*2]+photonAndBackground[id2*2+1]+scmos[id];
}
} | 8a1733f6fcdaa13ae07e72e5bfeb3a857f90f651.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_addPhotonsAndBackgroundMany_scmos (int n, int sizeSubImage,double *output, double *input, double *photonAndBackground, double * scmos)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
int id2=id/sizeSubImage;
if (id < n)
{
output[id]=input[id]*photonAndBackground[id2*2]+photonAndBackground[id2*2+1]+scmos[id];
}
} |
20a4be01c32c09718dae22fa7eb01ebe4360470a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "spread.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
Real *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
unsigned int spitch = 1;
Real *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
unsigned int dpitch = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
spread), dim3(gridBlock),dim3(threadBlock), 0, 0, src,spitch,dst,dpitch);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
spread), dim3(gridBlock),dim3(threadBlock), 0, 0, src,spitch,dst,dpitch);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
spread), dim3(gridBlock),dim3(threadBlock), 0, 0, src,spitch,dst,dpitch);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 20a4be01c32c09718dae22fa7eb01ebe4360470a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "spread.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
Real *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
unsigned int spitch = 1;
Real *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
unsigned int dpitch = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
spread<<<gridBlock,threadBlock>>>(src,spitch,dst,dpitch);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
spread<<<gridBlock,threadBlock>>>(src,spitch,dst,dpitch);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
spread<<<gridBlock,threadBlock>>>(src,spitch,dst,dpitch);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
22a84cf52abdf15f241f751e7977812d2b8c0d7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* The MIT License
*
* Copyright (c) 1997-2020 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <CCA/Components/Schedulers/GPUDataWarehouse.h>
#include <Core/Grid/Variables/GPUGridVariable.h>
#include <Core/Parallel/Parallel.h>
#include <Core/Util/GPU.h>
#include <sci_defs/cuda_defs.h>
namespace Uintah {
//______________________________________________________________________
//
// @brief A GPU kernel for the Jacobi iterations in the Poisson 1-material solver
// @param patchID the patch this kernel will operate over
// @param matlIndex the material associated with the specified patchID
// @param domainLow a three component vector that gives the lower corner of the work area as (x,y,z)
// @param domainHigh a three component vector that gives the highest corner of the work area as (x,y,z)
// @param old_gpudw the old GPU DataWarehouse
// @param new_gpudw the new GPU DataWarehouse
__global__
void
unifiedSchedulerTestKernel( int patchID
, uint3 patchNodeLowIndex
, uint3 patchNodeHighIndex
, uint3 domainLow
, uint3 domainHigh
, GPUDataWarehouse * old_gpudw
, GPUDataWarehouse * new_gpudw
, hipStream_t * stream
)
{
const GPUGridVariable<double> phi;
GPUGridVariable<double> newphi;
old_gpudw->get(phi, "phi", patchID, 0, 0);
new_gpudw->getModifiable(newphi, "phi", patchID, 0);
// calculate the thread indices
int i = blockDim.x * blockIdx.x + threadIdx.x + patchNodeLowIndex.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + patchNodeLowIndex.y;
// If the threads are within the bounds of the patch the algorithm is allowed to stream along the z direction
// applying the stencil to a line of cells. The z direction is streamed because it allows access of x and y
// elements that are close to one another which should allow coalesced memory accesses.
// Copy all face cells (any on an exposed face).
// These outer cells don't get computed, just preserved across iterations.
// newphi(i,j,k) = phi(i,j,k)
if (i >= patchNodeLowIndex.x && j >= patchNodeLowIndex.y && i < patchNodeHighIndex.x && j < patchNodeHighIndex.y ) {
if ((domainLow.x - patchNodeLowIndex.x == 1 && i == patchNodeLowIndex.x) || /*left face*/
(domainLow.y - patchNodeLowIndex.y == 1 && j == patchNodeLowIndex.y) || /*bottom face*/
(patchNodeHighIndex.x - domainHigh.x == 1 && i == patchNodeHighIndex.x - 1) || /*right face*/
(patchNodeHighIndex.y - domainHigh.y == 1 && j == patchNodeHighIndex.y - 1)) { /*top face*/
for (int k = domainLow.z; k < domainHigh.z; k++) {
newphi(i,j,k) = phi(i,j,k);
}
}
if (domainLow.z - patchNodeLowIndex.z == 1) {
newphi(i,j,patchNodeLowIndex.z) = phi(i,j,patchNodeLowIndex.z);
}
if (patchNodeHighIndex.z - domainHigh.z == 1) {
newphi(i,j,patchNodeHighIndex.z-1) = phi(i,j,patchNodeHighIndex.z-1);
}
}
__syncthreads();
if (i >= domainLow.x && j >= domainLow.y && i < domainHigh.x && j < domainHigh.y ) {
for (int k = domainLow.z; k < domainHigh.z; k++) {
newphi(i,j,k) = (1. / 6)
* (phi(i-1, j, k)
+ phi(i+1, j, k)
+ phi(i, j-1, k)
+ phi(i, j+1, k)
+ phi(i, j, k-1)
+ phi(i, j, k+1));
}
}
}
void
launchUnifiedSchedulerTestKernel( dim3 dimGrid
, dim3 dimBlock
, hipStream_t * stream
, int patchID
, uint3 patchNodeLowIndex
, uint3 patchNodeHighIndex
, uint3 domainLow
, uint3 domainHigh
, GPUDataWarehouse * old_gpudw
, GPUDataWarehouse * new_gpudw
)
{
hipLaunchKernelGGL(( unifiedSchedulerTestKernel), dim3(dimGrid), dim3(dimBlock), 0, *stream, patchID
, patchNodeLowIndex
, patchNodeHighIndex
, domainLow
, domainHigh
, old_gpudw
, new_gpudw
, stream
);
//hipDeviceSynchronize();
}
} //end namespace Uintah
| 22a84cf52abdf15f241f751e7977812d2b8c0d7a.cu | /*
* The MIT License
*
* Copyright (c) 1997-2020 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <CCA/Components/Schedulers/GPUDataWarehouse.h>
#include <Core/Grid/Variables/GPUGridVariable.h>
#include <Core/Parallel/Parallel.h>
#include <Core/Util/GPU.h>
#include <sci_defs/cuda_defs.h>
namespace Uintah {
//______________________________________________________________________
//
// @brief A GPU kernel for the Jacobi iterations in the Poisson 1-material solver
// @param patchID the patch this kernel will operate over
// @param matlIndex the material associated with the specified patchID
// @param domainLow a three component vector that gives the lower corner of the work area as (x,y,z)
// @param domainHigh a three component vector that gives the highest corner of the work area as (x,y,z)
// @param old_gpudw the old GPU DataWarehouse
// @param new_gpudw the new GPU DataWarehouse
__global__
void
unifiedSchedulerTestKernel( int patchID
, uint3 patchNodeLowIndex
, uint3 patchNodeHighIndex
, uint3 domainLow
, uint3 domainHigh
, GPUDataWarehouse * old_gpudw
, GPUDataWarehouse * new_gpudw
, cudaStream_t * stream
)
{
const GPUGridVariable<double> phi;
GPUGridVariable<double> newphi;
old_gpudw->get(phi, "phi", patchID, 0, 0);
new_gpudw->getModifiable(newphi, "phi", patchID, 0);
// calculate the thread indices
int i = blockDim.x * blockIdx.x + threadIdx.x + patchNodeLowIndex.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + patchNodeLowIndex.y;
// If the threads are within the bounds of the patch the algorithm is allowed to stream along the z direction
// applying the stencil to a line of cells. The z direction is streamed because it allows access of x and y
// elements that are close to one another which should allow coalesced memory accesses.
// Copy all face cells (any on an exposed face).
// These outer cells don't get computed, just preserved across iterations.
// newphi(i,j,k) = phi(i,j,k)
if (i >= patchNodeLowIndex.x && j >= patchNodeLowIndex.y && i < patchNodeHighIndex.x && j < patchNodeHighIndex.y ) {
if ((domainLow.x - patchNodeLowIndex.x == 1 && i == patchNodeLowIndex.x) || /*left face*/
(domainLow.y - patchNodeLowIndex.y == 1 && j == patchNodeLowIndex.y) || /*bottom face*/
(patchNodeHighIndex.x - domainHigh.x == 1 && i == patchNodeHighIndex.x - 1) || /*right face*/
(patchNodeHighIndex.y - domainHigh.y == 1 && j == patchNodeHighIndex.y - 1)) { /*top face*/
for (int k = domainLow.z; k < domainHigh.z; k++) {
newphi(i,j,k) = phi(i,j,k);
}
}
if (domainLow.z - patchNodeLowIndex.z == 1) {
newphi(i,j,patchNodeLowIndex.z) = phi(i,j,patchNodeLowIndex.z);
}
if (patchNodeHighIndex.z - domainHigh.z == 1) {
newphi(i,j,patchNodeHighIndex.z-1) = phi(i,j,patchNodeHighIndex.z-1);
}
}
__syncthreads();
if (i >= domainLow.x && j >= domainLow.y && i < domainHigh.x && j < domainHigh.y ) {
for (int k = domainLow.z; k < domainHigh.z; k++) {
newphi(i,j,k) = (1. / 6)
* (phi(i-1, j, k)
+ phi(i+1, j, k)
+ phi(i, j-1, k)
+ phi(i, j+1, k)
+ phi(i, j, k-1)
+ phi(i, j, k+1));
}
}
}
void
launchUnifiedSchedulerTestKernel( dim3 dimGrid
, dim3 dimBlock
, cudaStream_t * stream
, int patchID
, uint3 patchNodeLowIndex
, uint3 patchNodeHighIndex
, uint3 domainLow
, uint3 domainHigh
, GPUDataWarehouse * old_gpudw
, GPUDataWarehouse * new_gpudw
)
{
unifiedSchedulerTestKernel<<< dimGrid, dimBlock, 0, *stream>>>( patchID
, patchNodeLowIndex
, patchNodeHighIndex
, domainLow
, domainHigh
, old_gpudw
, new_gpudw
, stream
);
//cudaDeviceSynchronize();
}
} //end namespace Uintah
|
84b4c220297a47dbf446f20e6f02f0cef7823300.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kMartixSubstractMatrix.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int nThreads = 1;
const float *m1 = NULL;
hipMalloc(&m1, XSIZE*YSIZE);
const float *m2 = NULL;
hipMalloc(&m2, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kMartixSubstractMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, nThreads,m1,m2,output);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kMartixSubstractMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, nThreads,m1,m2,output);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kMartixSubstractMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, nThreads,m1,m2,output);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 84b4c220297a47dbf446f20e6f02f0cef7823300.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kMartixSubstractMatrix.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int nThreads = 1;
const float *m1 = NULL;
cudaMalloc(&m1, XSIZE*YSIZE);
const float *m2 = NULL;
cudaMalloc(&m2, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kMartixSubstractMatrix<<<gridBlock,threadBlock>>>(nThreads,m1,m2,output);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kMartixSubstractMatrix<<<gridBlock,threadBlock>>>(nThreads,m1,m2,output);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kMartixSubstractMatrix<<<gridBlock,threadBlock>>>(nThreads,m1,m2,output);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
318407b47d9b01c78c0c7541a5737dcfd60f2079.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zgesellcmv.cu, normal z -> d, Tue Aug 30 09:38:42 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// SELLC SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
__global__ void
dgesellcmv_kernel(
int num_rows,
int num_cols,
int blocksize,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = dcolind [offset+ blocksize * n + threadIdx.x ];
double val = dval[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*dx[col];
}
}
dy[ Idx ] = dot * alpha + beta * dy [ Idx ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLC/SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row (=1)
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in SELLC/P
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLC/P
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgesellcmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
// the kernel can only handle up to 65535 slices
// (~2M rows for blocksize 32)
dim3 grid( slices, 1, 1);
magma_int_t threads = blocksize;
hipLaunchKernelGGL(( dgesellcmv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, blocksize, alpha,
dval, dcolind, drowptr, dx, beta, dy );
return MAGMA_SUCCESS;
}
| 318407b47d9b01c78c0c7541a5737dcfd60f2079.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zgesellcmv.cu, normal z -> d, Tue Aug 30 09:38:42 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// SELLC SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
__global__ void
dgesellcmv_kernel(
int num_rows,
int num_cols,
int blocksize,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = dcolind [offset+ blocksize * n + threadIdx.x ];
double val = dval[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*dx[col];
}
}
dy[ Idx ] = dot * alpha + beta * dy [ Idx ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLC/SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row (=1)
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in SELLC/P
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLC/P
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgesellcmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
// the kernel can only handle up to 65535 slices
// (~2M rows for blocksize 32)
dim3 grid( slices, 1, 1);
magma_int_t threads = blocksize;
dgesellcmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, blocksize, alpha,
dval, dcolind, drowptr, dx, beta, dy );
return MAGMA_SUCCESS;
}
|
4c35cf42cc5252ba7090f4f69169e21112877f34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <ctime>
#include <string>
#include <iostream>
#include <stdlib.h>
typedef bool TYPE;
bool single_adder(TYPE A, TYPE B, TYPE& Ci, TYPE& Si)
{
Ci = A & B; //
Si = A ^ B; //
printf("%d + %d = %d, Ci: %d\n",A,B,Si,Ci);
return 1;
}
bool multi_adder(TYPE A, TYPE B, TYPE Ci0, TYPE& Ci, TYPE& Si)
{
Si = A ^ B ^ Ci0; //
Ci = A & B; //
Ci = ((A ^ B) & Ci0) | Ci; //
// Ci = ((A | B) & Ci0) | Ci; // AB11
printf("%d + %d + %d = %d, Ci: %d\n",A,B,Ci0,Si,Ci);
return 1;
}
__global__ void full_adder(TYPE *num1, TYPE *num2, TYPE *result, TYPE* Ci0_, int startIdx, int length)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index >= length)
{
return;
}
TYPE Ci0 = Ci0_[0];
TYPE Ci = 0;
for(int i = 0; i < index; i++)
{
TYPE A = num1[i + startIdx];
TYPE B = num2[i + startIdx];
Ci = A & B;
Ci = ((A ^ B) & Ci0) | Ci; //
Ci0 = Ci;
}
int i = index;
TYPE A = num1[i + startIdx];
TYPE B = num2[i + startIdx];
TYPE Si = A ^ B ^ Ci0; //
result[index + startIdx] = Si;
if(index == length - 1)
{
Ci = A & B;
Ci = ((A ^ B) & Ci0) | Ci; //
Ci0_[0] = Ci;
}
}
int convert_string_to_array(std::string num1, std::string num2, TYPE* &A, TYPE* &B)
{
// 0
int zero_num = num1.length() - num2.length();
std::string* temp;
if(zero_num > 0)
{
temp = &num2;
}
else if(zero_num < 0)
{
temp = &num1;
}
for(int i = 0; i < abs(zero_num); i++)
{
*temp = "0" + *temp;
}
int length = num1.length();
A = new TYPE[length];
B = new TYPE[length];
for(int i = 0; i < length; i++)
{
A[i] = num1[length - 1 - i] - '0';
B[i] = num2[length - 1 - i] - '0';
}
return length;
}
int main()
{
std::string num1 = "10101";
std::string num2 = "11111";
TYPE *ACpu;
TYPE *BCpu;
int length = convert_string_to_array(num1, num2, ACpu, BCpu);
for(int i = 0; i < length; i++)
{
printf("%d ", ACpu[i]);
}
printf("\n");
for(int i = 0; i < length; i++)
{
printf("%d ", BCpu[i]);
}
printf("\n");
TYPE *AGpu;
hipMalloc((void**)&AGpu, length * sizeof(TYPE));
hipMemcpy(AGpu, ACpu, length * sizeof(TYPE), hipMemcpyHostToDevice);
TYPE *BGpu;
hipMalloc((void**)&BGpu, length * sizeof(TYPE));
hipMemcpy(BGpu, BCpu, length * sizeof(TYPE), hipMemcpyHostToDevice);
TYPE *resultGpu;
hipMalloc((void**)&resultGpu, length * sizeof(TYPE));
TYPE *CiCpu = new TYPE[1];
CiCpu = 0;
TYPE *CiGpu;
hipMalloc((void**)&CiGpu, 1 * sizeof(TYPE));
hipMemcpy(CiCpu, CiGpu, 1 * sizeof(TYPE), hipMemcpyHostToDevice);
int threadNum = 1;
int blockNum = 4;
int totalNum = threadNum * blockNum;
for(int i = 0; totalNum * i < length; i++)
{
printf("i: %d, i2: %d\n", totalNum * i, min(length - totalNum * i, totalNum));
full_adder<<<blockNum, threadNum>> >(AGpu, BGpu, resultGpu, CiGpu, totalNum * i, min(length - totalNum * i, totalNum));
}
TYPE *result = new TYPE[length];
hipMemcpy(result, resultGpu, length * sizeof(TYPE), hipMemcpyDeviceToHost);
for(int i = 0; i < length; i++)
{
printf("%d ", result[length - 1 - i]);
}
printf("\n");
return 0;
} | 4c35cf42cc5252ba7090f4f69169e21112877f34.cu | #include <stdio.h>
#include <ctime>
#include <string>
#include <iostream>
#include <stdlib.h>
typedef bool TYPE;
bool single_adder(TYPE A, TYPE B, TYPE& Ci, TYPE& Si)
{
Ci = A & B; // 与门表示进位
Si = A ^ B; // 异或门表示和
printf("%d + %d = %d, Ci: %d\n",A,B,Si,Ci);
return 1;
}
bool multi_adder(TYPE A, TYPE B, TYPE Ci0, TYPE& Ci, TYPE& Si)
{
Si = A ^ B ^ Ci0; // 异或门表示和
Ci = A & B; // 进位产生信号
Ci = ((A ^ B) & Ci0) | Ci; // 进位传递信号
// Ci = ((A | B) & Ci0) | Ci; // 直接考虑AB其中一个是1且传递的进位符也是1的情况决定是否进位
printf("%d + %d + %d = %d, Ci: %d\n",A,B,Ci0,Si,Ci);
return 1;
}
__global__ void full_adder(TYPE *num1, TYPE *num2, TYPE *result, TYPE* Ci0_, int startIdx, int length)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index >= length)
{
return;
}
TYPE Ci0 = Ci0_[0];
TYPE Ci = 0;
for(int i = 0; i < index; i++)
{
TYPE A = num1[i + startIdx];
TYPE B = num2[i + startIdx];
Ci = A & B;
Ci = ((A ^ B) & Ci0) | Ci; // 进位传递信号
Ci0 = Ci;
}
int i = index;
TYPE A = num1[i + startIdx];
TYPE B = num2[i + startIdx];
TYPE Si = A ^ B ^ Ci0; // 异或门表示和
result[index + startIdx] = Si;
if(index == length - 1)
{
Ci = A & B;
Ci = ((A ^ B) & Ci0) | Ci; // 进位传递信号
Ci0_[0] = Ci;
}
}
int convert_string_to_array(std::string num1, std::string num2, TYPE* &A, TYPE* &B)
{
// 如果他俩不一样长就给短的补0
int zero_num = num1.length() - num2.length();
std::string* temp;
if(zero_num > 0)
{
temp = &num2;
}
else if(zero_num < 0)
{
temp = &num1;
}
for(int i = 0; i < abs(zero_num); i++)
{
*temp = "0" + *temp;
}
int length = num1.length();
A = new TYPE[length];
B = new TYPE[length];
for(int i = 0; i < length; i++)
{
A[i] = num1[length - 1 - i] - '0';
B[i] = num2[length - 1 - i] - '0';
}
return length;
}
int main()
{
std::string num1 = "10101";
std::string num2 = "11111";
TYPE *ACpu;
TYPE *BCpu;
int length = convert_string_to_array(num1, num2, ACpu, BCpu);
for(int i = 0; i < length; i++)
{
printf("%d ", ACpu[i]);
}
printf("\n");
for(int i = 0; i < length; i++)
{
printf("%d ", BCpu[i]);
}
printf("\n");
TYPE *AGpu;
cudaMalloc((void**)&AGpu, length * sizeof(TYPE));
cudaMemcpy(AGpu, ACpu, length * sizeof(TYPE), cudaMemcpyHostToDevice);
TYPE *BGpu;
cudaMalloc((void**)&BGpu, length * sizeof(TYPE));
cudaMemcpy(BGpu, BCpu, length * sizeof(TYPE), cudaMemcpyHostToDevice);
TYPE *resultGpu;
cudaMalloc((void**)&resultGpu, length * sizeof(TYPE));
TYPE *CiCpu = new TYPE[1];
CiCpu = 0;
TYPE *CiGpu;
cudaMalloc((void**)&CiGpu, 1 * sizeof(TYPE));
cudaMemcpy(CiCpu, CiGpu, 1 * sizeof(TYPE), cudaMemcpyHostToDevice);
int threadNum = 1;
int blockNum = 4;
int totalNum = threadNum * blockNum;
for(int i = 0; totalNum * i < length; i++)
{
printf("i: %d, i2: %d\n", totalNum * i, min(length - totalNum * i, totalNum));
full_adder<<<blockNum, threadNum>> >(AGpu, BGpu, resultGpu, CiGpu, totalNum * i, min(length - totalNum * i, totalNum));
}
TYPE *result = new TYPE[length];
cudaMemcpy(result, resultGpu, length * sizeof(TYPE), cudaMemcpyDeviceToHost);
for(int i = 0; i < length; i++)
{
printf("%d ", result[length - 1 - i]);
}
printf("\n");
return 0;
} |
778d7bf90c5350479c23d51c7e45e311ead0c371.hip | // !!! This is a file automatically generated by hipify!!!
#include "VolumeFilter.h"
#include <hip/hip_runtime.h>
#include "cudaUtil.h"
// HACK to get stuff to compile...
extern texture<float4, hipTextureType3D, hipReadModeElementType> g_texVolume1;
#include "VolumeFilterKernels.cuh"
//void VolumeFilter::GetMaskedVelocity(const float* d_pDataX, const float* d_pDataY, const float* d_pDataZ, float* d_pOutX, float* d_pOutY, float* d_pOutZ, const tum3D::Vec3i& size, eMeasure measure, float threshold)
//{
// dim3 blockSize(32, 4);
// dim3 blockCount((size.x() + blockSize.x - 1) / blockSize.x, (size.y() + blockSize.y - 1) / blockSize.y, (size.z() + blockSize.z - 1) / blockSize.z);
//
// #define MASKED_VELOCITY(M) maskedVelocityKernel<M><<<blockCount, blockSize>>>(d_pDataX, d_pDataY, d_pDataZ, d_pOutX, d_pOutY, d_pOutZ, size.x(), size.y(), size.z(), threshold)
// switch(measure)
// {
// case MEASURE_VELOCITY : MASKED_VELOCITY(MEASURE_VELOCITY); break;
// case MEASURE_VORTICITY : MASKED_VELOCITY(MEASURE_VORTICITY); break;
// case MEASURE_LAMBDA2 : MASKED_VELOCITY(MEASURE_LAMBDA2); break;
// case MEASURE_QHUNT : MASKED_VELOCITY(MEASURE_QHUNT); break;
// case MEASURE_DELTACHONG : MASKED_VELOCITY(MEASURE_DELTACHONG); break;
// case MEASURE_ENSTROPHY_PRODUCTION : MASKED_VELOCITY(MEASURE_ENSTROPHY_PRODUCTION); break;
// case MEASURE_STRAIN_PRODUCTION : MASKED_VELOCITY(MEASURE_STRAIN_PRODUCTION); break;
// case MEASURE_SQUARE_ROTATION : MASKED_VELOCITY(MEASURE_SQUARE_ROTATION); break;
// case MEASURE_SQUARE_RATE_OF_STRAIN : MASKED_VELOCITY(MEASURE_SQUARE_RATE_OF_STRAIN); break;
// case MEASURE_TRACE_JJT : MASKED_VELOCITY(MEASURE_TRACE_JJT); break;
// case MEASURE_PVA : MASKED_VELOCITY(MEASURE_PVA); break;
// }
// #undef MASKED_VELOCITY
// cudaCheckMsg("maskedVelocityKernel execution failed");
//}
//void VolumeFilter::GetMaskedJacobian(const float* d_pDataX, const float* d_pDataY, const float* d_pDataZ, float* d_pOut0, float* d_pOut1, float* d_pOut2, float* d_pOut3, float* d_pOut4, float* d_pOut5, float* d_pOut6, float* d_pOut7, float* d_pOut8, const tum3D::Vec3i& size, eMeasure measure, float threshold)
//{
// dim3 blockSize(32, 4);
// dim3 blockCount((size.x() + blockSize.x - 1) / blockSize.x, (size.y() + blockSize.y - 1) / blockSize.y, (size.z() + blockSize.z - 1) / blockSize.z);
//
// #define MASKED_JACOBIAN(M) maskedJacobianKernel<M><<<blockCount, blockSize>>>(d_pDataX, d_pDataY, d_pDataZ, d_pOut0, d_pOut1, d_pOut2, d_pOut3, d_pOut4, d_pOut5, d_pOut6, d_pOut7, d_pOut8, size.x(), size.y(), size.z(), threshold)
// switch(measure)
// {
// case MEASURE_VELOCITY : MASKED_JACOBIAN(MEASURE_VELOCITY); break;
// case MEASURE_VORTICITY : MASKED_JACOBIAN(MEASURE_VORTICITY); break;
// case MEASURE_LAMBDA2 : MASKED_JACOBIAN(MEASURE_LAMBDA2); break;
// case MEASURE_QHUNT : MASKED_JACOBIAN(MEASURE_QHUNT); break;
// case MEASURE_DELTACHONG : MASKED_JACOBIAN(MEASURE_DELTACHONG); break;
// case MEASURE_ENSTROPHY_PRODUCTION : MASKED_JACOBIAN(MEASURE_ENSTROPHY_PRODUCTION); break;
// case MEASURE_STRAIN_PRODUCTION : MASKED_JACOBIAN(MEASURE_STRAIN_PRODUCTION); break;
// case MEASURE_SQUARE_ROTATION : MASKED_JACOBIAN(MEASURE_SQUARE_ROTATION); break;
// case MEASURE_SQUARE_RATE_OF_STRAIN : MASKED_JACOBIAN(MEASURE_SQUARE_RATE_OF_STRAIN); break;
// case MEASURE_TRACE_JJT : MASKED_JACOBIAN(MEASURE_TRACE_JJT); break;
// case MEASURE_PVA : MASKED_JACOBIAN(MEASURE_PVA); break;
// }
// #undef MASKED_JACOBIAN
// cudaCheckMsg("maskedVelocityKernel execution failed");
//}
void VolumeFilter::Filter(EFilterDirection dir, int radius, const ChannelData& data, const tum3D::Vec3ui& size, int overlap, int sizeLeft, int sizeRight)
{
switch(dir)
{
case DIR_X:
{
dim3 blockSize(32, 4);
dim3 blockCount((size.y() + blockSize.x - 1) / blockSize.x, (size.z() + blockSize.y - 1) / blockSize.y);
hipLaunchKernelGGL(( filterXKernel), dim3(blockCount), dim3(blockSize), 0, 0, radius, data.d_pData, data.d_pOut, size.x(), size.y(), size.z(), 2 * overlap, data.d_pLeft, sizeLeft, data.d_pRight, sizeRight);
cudaCheckMsg("filterXKernel execution failed");
break;
}
case DIR_Y:
{
dim3 blockSize(32, 4);
dim3 blockCount((size.x() + blockSize.x - 1) / blockSize.x, (size.z() + blockSize.y - 1) / blockSize.y);
hipLaunchKernelGGL(( filterYKernel), dim3(blockCount), dim3(blockSize), 0, 0, radius, data.d_pData, data.d_pOut, size.x(), size.y(), size.z(), 2 * overlap, data.d_pLeft, sizeLeft, data.d_pRight, sizeRight);
cudaCheckMsg("filterYKernel execution failed");
break;
}
case DIR_Z:
{
dim3 blockSize(32, 4);
dim3 blockCount((size.x() + blockSize.x - 1) / blockSize.x, (size.y() + blockSize.y - 1) / blockSize.y);
hipLaunchKernelGGL(( filterZKernel), dim3(blockCount), dim3(blockSize), 0, 0, radius, data.d_pData, data.d_pOut, size.x(), size.y(), size.z(), 2 * overlap, data.d_pLeft, sizeLeft, data.d_pRight, sizeRight);
cudaCheckMsg("filterZKernel execution failed");
break;
}
}
//hipError_t e = hipDeviceSynchronize();
//if(e != hipSuccess) printf("ERROR: %i\n", int(e));
}
| 778d7bf90c5350479c23d51c7e45e311ead0c371.cu | #include "VolumeFilter.h"
#include <cuda_runtime.h>
#include "cudaUtil.h"
// HACK to get stuff to compile...
extern texture<float4, cudaTextureType3D, cudaReadModeElementType> g_texVolume1;
#include "VolumeFilterKernels.cuh"
//void VolumeFilter::GetMaskedVelocity(const float* d_pDataX, const float* d_pDataY, const float* d_pDataZ, float* d_pOutX, float* d_pOutY, float* d_pOutZ, const tum3D::Vec3i& size, eMeasure measure, float threshold)
//{
// dim3 blockSize(32, 4);
// dim3 blockCount((size.x() + blockSize.x - 1) / blockSize.x, (size.y() + blockSize.y - 1) / blockSize.y, (size.z() + blockSize.z - 1) / blockSize.z);
//
// #define MASKED_VELOCITY(M) maskedVelocityKernel<M><<<blockCount, blockSize>>>(d_pDataX, d_pDataY, d_pDataZ, d_pOutX, d_pOutY, d_pOutZ, size.x(), size.y(), size.z(), threshold)
// switch(measure)
// {
// case MEASURE_VELOCITY : MASKED_VELOCITY(MEASURE_VELOCITY); break;
// case MEASURE_VORTICITY : MASKED_VELOCITY(MEASURE_VORTICITY); break;
// case MEASURE_LAMBDA2 : MASKED_VELOCITY(MEASURE_LAMBDA2); break;
// case MEASURE_QHUNT : MASKED_VELOCITY(MEASURE_QHUNT); break;
// case MEASURE_DELTACHONG : MASKED_VELOCITY(MEASURE_DELTACHONG); break;
// case MEASURE_ENSTROPHY_PRODUCTION : MASKED_VELOCITY(MEASURE_ENSTROPHY_PRODUCTION); break;
// case MEASURE_STRAIN_PRODUCTION : MASKED_VELOCITY(MEASURE_STRAIN_PRODUCTION); break;
// case MEASURE_SQUARE_ROTATION : MASKED_VELOCITY(MEASURE_SQUARE_ROTATION); break;
// case MEASURE_SQUARE_RATE_OF_STRAIN : MASKED_VELOCITY(MEASURE_SQUARE_RATE_OF_STRAIN); break;
// case MEASURE_TRACE_JJT : MASKED_VELOCITY(MEASURE_TRACE_JJT); break;
// case MEASURE_PVA : MASKED_VELOCITY(MEASURE_PVA); break;
// }
// #undef MASKED_VELOCITY
// cudaCheckMsg("maskedVelocityKernel execution failed");
//}
//void VolumeFilter::GetMaskedJacobian(const float* d_pDataX, const float* d_pDataY, const float* d_pDataZ, float* d_pOut0, float* d_pOut1, float* d_pOut2, float* d_pOut3, float* d_pOut4, float* d_pOut5, float* d_pOut6, float* d_pOut7, float* d_pOut8, const tum3D::Vec3i& size, eMeasure measure, float threshold)
//{
// dim3 blockSize(32, 4);
// dim3 blockCount((size.x() + blockSize.x - 1) / blockSize.x, (size.y() + blockSize.y - 1) / blockSize.y, (size.z() + blockSize.z - 1) / blockSize.z);
//
// #define MASKED_JACOBIAN(M) maskedJacobianKernel<M><<<blockCount, blockSize>>>(d_pDataX, d_pDataY, d_pDataZ, d_pOut0, d_pOut1, d_pOut2, d_pOut3, d_pOut4, d_pOut5, d_pOut6, d_pOut7, d_pOut8, size.x(), size.y(), size.z(), threshold)
// switch(measure)
// {
// case MEASURE_VELOCITY : MASKED_JACOBIAN(MEASURE_VELOCITY); break;
// case MEASURE_VORTICITY : MASKED_JACOBIAN(MEASURE_VORTICITY); break;
// case MEASURE_LAMBDA2 : MASKED_JACOBIAN(MEASURE_LAMBDA2); break;
// case MEASURE_QHUNT : MASKED_JACOBIAN(MEASURE_QHUNT); break;
// case MEASURE_DELTACHONG : MASKED_JACOBIAN(MEASURE_DELTACHONG); break;
// case MEASURE_ENSTROPHY_PRODUCTION : MASKED_JACOBIAN(MEASURE_ENSTROPHY_PRODUCTION); break;
// case MEASURE_STRAIN_PRODUCTION : MASKED_JACOBIAN(MEASURE_STRAIN_PRODUCTION); break;
// case MEASURE_SQUARE_ROTATION : MASKED_JACOBIAN(MEASURE_SQUARE_ROTATION); break;
// case MEASURE_SQUARE_RATE_OF_STRAIN : MASKED_JACOBIAN(MEASURE_SQUARE_RATE_OF_STRAIN); break;
// case MEASURE_TRACE_JJT : MASKED_JACOBIAN(MEASURE_TRACE_JJT); break;
// case MEASURE_PVA : MASKED_JACOBIAN(MEASURE_PVA); break;
// }
// #undef MASKED_JACOBIAN
// cudaCheckMsg("maskedVelocityKernel execution failed");
//}
void VolumeFilter::Filter(EFilterDirection dir, int radius, const ChannelData& data, const tum3D::Vec3ui& size, int overlap, int sizeLeft, int sizeRight)
{
switch(dir)
{
case DIR_X:
{
dim3 blockSize(32, 4);
dim3 blockCount((size.y() + blockSize.x - 1) / blockSize.x, (size.z() + blockSize.y - 1) / blockSize.y);
filterXKernel<<<blockCount, blockSize>>>(radius, data.d_pData, data.d_pOut, size.x(), size.y(), size.z(), 2 * overlap, data.d_pLeft, sizeLeft, data.d_pRight, sizeRight);
cudaCheckMsg("filterXKernel execution failed");
break;
}
case DIR_Y:
{
dim3 blockSize(32, 4);
dim3 blockCount((size.x() + blockSize.x - 1) / blockSize.x, (size.z() + blockSize.y - 1) / blockSize.y);
filterYKernel<<<blockCount, blockSize>>>(radius, data.d_pData, data.d_pOut, size.x(), size.y(), size.z(), 2 * overlap, data.d_pLeft, sizeLeft, data.d_pRight, sizeRight);
cudaCheckMsg("filterYKernel execution failed");
break;
}
case DIR_Z:
{
dim3 blockSize(32, 4);
dim3 blockCount((size.x() + blockSize.x - 1) / blockSize.x, (size.y() + blockSize.y - 1) / blockSize.y);
filterZKernel<<<blockCount, blockSize>>>(radius, data.d_pData, data.d_pOut, size.x(), size.y(), size.z(), 2 * overlap, data.d_pLeft, sizeLeft, data.d_pRight, sizeRight);
cudaCheckMsg("filterZKernel execution failed");
break;
}
}
//cudaError_t e = cudaDeviceSynchronize();
//if(e != cudaSuccess) printf("ERROR: %i\n", int(e));
}
|
87bd30420a446a2de619ea2e4b117385eadaf7b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Cristhian Alberto Gonzales Castillo <[email protected]>
* Copyright 2018 Alexander Ocsa <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/replace.hpp>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/find.h>
#include <thrust/pair.h>
#include <thrust/tuple.h>
namespace { // anonymous
static constexpr int BLOCK_SIZE = 256;
// return the new_value for output column at index `idx`
template <class T, bool replacement_has_nulls>
__device__ auto get_new_value(cudf::size_type idx,
T const* __restrict__ input_data,
T const* __restrict__ values_to_replace_begin,
T const* __restrict__ values_to_replace_end,
T const* __restrict__ d_replacement_values,
cudf::bitmask_type const* __restrict__ replacement_valid)
{
auto found_ptr =
thrust::find(thrust::seq, values_to_replace_begin, values_to_replace_end, input_data[idx]);
T new_value{};
bool output_is_valid{true};
if (found_ptr != values_to_replace_end) {
auto d = thrust::distance(values_to_replace_begin, found_ptr);
new_value = d_replacement_values[d];
if (replacement_has_nulls) { output_is_valid = cudf::bit_is_set(replacement_valid, d); }
} else {
new_value = input_data[idx];
}
return thrust::make_pair(new_value, output_is_valid);
}
__device__ int get_new_string_value(cudf::size_type idx,
cudf::column_device_view& input,
cudf::column_device_view& values_to_replace,
cudf::column_device_view&)
{
cudf::string_view input_string = input.element<cudf::string_view>(idx);
int match = -1;
for (int i = 0; i < values_to_replace.size(); i++) {
cudf::string_view value_string = values_to_replace.element<cudf::string_view>(i);
if (input_string == value_string) {
match = i;
break;
}
}
return match;
}
/**
* @brief Kernel which does the first pass of strings replace.
*
* It computes the output null_mask, null_count, and the offsets.
*
* @param input The input column to replace strings in.
* @param values_to_replace The string values to replace.
* @param replacement The replacement values.
* @param offsets The column which will contain the offsets of the new string column
* @param indices Temporary column used to store the replacement indices
* @param output_valid The output null_mask
* @param output_valid_count The output valid count
*/
template <bool input_has_nulls, bool replacement_has_nulls>
__global__ void replace_strings_first_pass(cudf::column_device_view input,
cudf::column_device_view values_to_replace,
cudf::column_device_view replacement,
cudf::mutable_column_device_view offsets,
cudf::mutable_column_device_view indices,
cudf::bitmask_type* output_valid,
cudf::size_type* __restrict__ output_valid_count)
{
cudf::size_type nrows = input.size();
auto tid = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::detail::grid_1d::grid_stride();
uint32_t active_mask = 0xffff'ffffu;
active_mask = __ballot_sync(active_mask, tid < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (tid < nrows) {
auto const idx = static_cast<cudf::size_type>(tid);
bool input_is_valid = true;
if (input_has_nulls) input_is_valid = input.is_valid_nocheck(idx);
bool output_is_valid = input_is_valid;
if (input_is_valid) {
int result = get_new_string_value(idx, input, values_to_replace, replacement);
cudf::string_view output = (result == -1) ? input.element<cudf::string_view>(idx)
: replacement.element<cudf::string_view>(result);
offsets.data<cudf::size_type>()[idx] = output.size_bytes();
indices.data<cudf::size_type>()[idx] = result;
if (replacement_has_nulls && result != -1) {
output_is_valid = replacement.is_valid_nocheck(result);
}
} else {
offsets.data<cudf::size_type>()[idx] = 0;
indices.data<cudf::size_type>()[idx] = -1;
}
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output_valid[cudf::word_index(idx)] = bitmask;
valid_sum += __popc(bitmask);
}
tid += stride;
active_mask = __ballot_sync(active_mask, tid < nrows);
}
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
/**
* @brief Kernel which does the second pass of strings replace.
*
* It copies the string data needed from input and replacement into the new strings column chars
* column.
*
* @param input The input column
* @param replacement The replacement values
* @param offsets The offsets column of the new strings column
* @param strings The chars column of the new strings column
* @param indices Temporary column used to store the replacement indices.
*/
template <bool input_has_nulls, bool replacement_has_nulls>
__global__ void replace_strings_second_pass(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::mutable_column_device_view offsets,
cudf::mutable_column_device_view strings,
cudf::mutable_column_device_view indices)
{
cudf::size_type nrows = input.size();
auto tid = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::detail::grid_1d::grid_stride();
while (tid < nrows) {
auto const idx = static_cast<cudf::size_type>(tid);
auto const replace_idx = indices.element<cudf::size_type>(idx);
bool output_is_valid = true;
bool input_is_valid = true;
if (input_has_nulls) {
input_is_valid = input.is_valid_nocheck(idx);
output_is_valid = input_is_valid;
}
if (replacement_has_nulls && replace_idx != -1) {
output_is_valid = replacement.is_valid_nocheck(replace_idx);
}
if (output_is_valid) {
cudf::string_view output = (replace_idx == -1)
? input.element<cudf::string_view>(idx)
: replacement.element<cudf::string_view>(replace_idx);
std::memcpy(strings.data<char>() + offsets.data<cudf::size_type>()[idx],
output.data(),
output.size_bytes());
}
tid += stride;
}
}
/**
* @brief Kernel that replaces elements from `output_data` given the following
* rule: replace all `values_to_replace[i]` in [values_to_replace_begin`,
* `values_to_replace_end`) present in `output_data` with `d_replacement_values[i]`.
*
* @tparam input_has_nulls `true` if output column has valid mask, `false` otherwise
* @tparam replacement_has_nulls `true` if replacement_values column has valid mask, `false`
* otherwise The input_has_nulls and replacement_has_nulls template parameters allows us to
* specialize this kernel for the different scenario for performance without writing different
* kernel.
*
* @param[in] input_data Device array with the data to be modified
* @param[in] input_valid Valid mask associated with input_data
* @param[out] output_data Device array to store the data from input_data
* @param[out] output_valid Valid mask associated with output_data
* @param[out] output_valid_count #valid in output column
* @param[in] nrows # rows in `output_data`
* @param[in] values_to_replace_begin Device pointer to the beginning of the sequence
* of old values to be replaced
* @param[in] values_to_replace_end Device pointer to the end of the sequence
* of old values to be replaced
* @param[in] d_replacement_values Device array with the new values
* @param[in] replacement_valid Valid mask associated with d_replacement_values
*/
template <class T, bool input_has_nulls, bool replacement_has_nulls>
__global__ void replace_kernel(cudf::column_device_view input,
cudf::mutable_column_device_view output,
cudf::size_type* __restrict__ output_valid_count,
cudf::size_type nrows,
cudf::column_device_view values_to_replace,
cudf::column_device_view replacement)
{
T* __restrict__ output_data = output.data<T>();
auto tid = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::detail::grid_1d::grid_stride();
uint32_t active_mask = 0xffff'ffffu;
active_mask = __ballot_sync(active_mask, tid < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (tid < nrows) {
auto const idx = static_cast<cudf::size_type>(tid);
bool output_is_valid{true};
bool input_is_valid{true};
if (input_has_nulls) {
input_is_valid = input.is_valid_nocheck(idx);
output_is_valid = input_is_valid;
}
if (input_is_valid)
thrust::tie(output_data[idx], output_is_valid) = get_new_value<T, replacement_has_nulls>(
idx,
input.data<T>(),
values_to_replace.data<T>(),
values_to_replace.data<T>() + values_to_replace.size(),
replacement.data<T>(),
replacement.null_mask());
/* output valid counts calculations*/
if (input_has_nulls or replacement_has_nulls) {
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output.set_mask_word(cudf::word_index(idx), bitmask);
valid_sum += __popc(bitmask);
}
}
tid += stride;
active_mask = __ballot_sync(active_mask, tid < nrows);
}
if (input_has_nulls or replacement_has_nulls) {
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count =
cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
}
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_kernel` with the appropriate data types.
*/
struct replace_kernel_forwarder {
template <typename col_type, std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
auto replace = [&] {
if (input_col.has_nulls())
return replacement_values.has_nulls() ? replace_kernel<col_type, true, true>
: replace_kernel<col_type, true, false>;
else
return replacement_values.has_nulls() ? replace_kernel<col_type, false, true>
: replace_kernel<col_type, false, false>;
}();
auto output = [&] {
auto const mask_allocation_policy = input_col.has_nulls() || replacement_values.has_nulls()
? cudf::mask_allocation_policy::ALWAYS
: cudf::mask_allocation_policy::NEVER;
return cudf::detail::allocate_like(
input_col, input_col.size(), mask_allocation_policy, stream, mr);
}();
auto output_view = output->mutable_view();
auto grid = cudf::detail::grid_1d{output_view.size(), BLOCK_SIZE, 1};
auto device_in = cudf::column_device_view::create(input_col, stream);
auto device_out = cudf::mutable_column_device_view::create(output_view, stream);
auto device_values_to_replace = cudf::column_device_view::create(values_to_replace, stream);
auto device_replacement_values = cudf::column_device_view::create(replacement_values, stream);
hipLaunchKernelGGL(( replace), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream.value(), *device_in,
*device_out,
valid_count,
output_view.size(),
*device_values_to_replace,
*device_replacement_values);
if (output_view.nullable()) {
output->set_null_count(output->size() - valid_counter.value(stream));
}
return output;
}
template <typename col_type, std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const&,
cudf::column_view const&,
cudf::column_view const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("No specialization exists for this type");
}
};
template <>
std::unique_ptr<cudf::column> replace_kernel_forwarder::operator()<cudf::string_view>(
cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
auto replace_first = replace_strings_first_pass<true, false>;
auto replace_second = replace_strings_second_pass<true, false>;
if (input_col.has_nulls()) {
if (replacement_values.has_nulls()) {
replace_first = replace_strings_first_pass<true, true>;
replace_second = replace_strings_second_pass<true, true>;
}
} else {
if (replacement_values.has_nulls()) {
replace_first = replace_strings_first_pass<false, true>;
replace_second = replace_strings_second_pass<false, true>;
} else {
replace_first = replace_strings_first_pass<false, false>;
replace_second = replace_strings_second_pass<false, false>;
}
}
// Create new offsets column to use in kernel
std::unique_ptr<cudf::column> sizes =
cudf::make_numeric_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
input_col.size(),
cudf::mask_state::UNALLOCATED,
stream);
std::unique_ptr<cudf::column> indices =
cudf::make_numeric_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
input_col.size(),
cudf::mask_state::UNALLOCATED,
stream);
auto sizes_view = sizes->mutable_view();
auto indices_view = indices->mutable_view();
auto device_in = cudf::column_device_view::create(input_col, stream);
auto device_values_to_replace = cudf::column_device_view::create(values_to_replace, stream);
auto device_replacement = cudf::column_device_view::create(replacement_values, stream);
auto device_sizes = cudf::mutable_column_device_view::create(sizes_view, stream);
auto device_indices = cudf::mutable_column_device_view::create(indices_view, stream);
rmm::device_buffer valid_bits =
cudf::detail::create_null_mask(input_col.size(), cudf::mask_state::UNINITIALIZED, stream, mr);
// Call first pass kernel to get sizes in offsets
cudf::detail::grid_1d grid{input_col.size(), BLOCK_SIZE, 1};
hipLaunchKernelGGL(( replace_first), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream.value(),
*device_in,
*device_values_to_replace,
*device_replacement,
*device_sizes,
*device_indices,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
valid_count);
auto [offsets, bytes] = cudf::detail::make_offsets_child_column(
sizes_view.begin<cudf::size_type>(), sizes_view.end<cudf::size_type>(), stream, mr);
auto offsets_view = offsets->mutable_view();
auto device_offsets = cudf::mutable_column_device_view::create(offsets_view, stream);
// Allocate chars array and output null mask
cudf::size_type null_count = input_col.size() - valid_counter.value(stream);
std::unique_ptr<cudf::column> output_chars =
cudf::strings::detail::create_chars_child_column(bytes, stream, mr);
auto output_chars_view = output_chars->mutable_view();
auto device_chars = cudf::mutable_column_device_view::create(output_chars_view, stream);
hipLaunchKernelGGL(( replace_second), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream.value(),
*device_in, *device_replacement, *device_offsets, *device_chars, *device_indices);
return cudf::make_strings_column(input_col.size(),
std::move(offsets),
std::move(output_chars),
null_count,
std::move(valid_bits));
}
template <>
std::unique_ptr<cudf::column> replace_kernel_forwarder::operator()<cudf::dictionary32>(
cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto input = cudf::dictionary_column_view(input_col);
auto values = cudf::dictionary_column_view(values_to_replace);
auto replacements = cudf::dictionary_column_view(replacement_values);
auto matched_input = [&] {
auto new_keys = cudf::detail::concatenate(
std::vector<cudf::column_view>({values.keys(), replacements.keys()}),
stream,
rmm::mr::get_current_device_resource());
return cudf::dictionary::detail::add_keys(input, new_keys->view(), stream, mr);
}();
auto matched_view = cudf::dictionary_column_view(matched_input->view());
auto matched_values = cudf::dictionary::detail::set_keys(
values, matched_view.keys(), stream, rmm::mr::get_current_device_resource());
auto matched_replacements = cudf::dictionary::detail::set_keys(
replacements, matched_view.keys(), stream, rmm::mr::get_current_device_resource());
auto indices_type = matched_view.indices().type();
auto new_indices = cudf::type_dispatcher<cudf::dispatch_storage_type>(
indices_type,
replace_kernel_forwarder{},
matched_view.get_indices_annotated(),
cudf::dictionary_column_view(matched_values->view()).indices(),
cudf::dictionary_column_view(matched_replacements->view()).get_indices_annotated(),
stream,
mr);
auto null_count = new_indices->null_count();
auto contents = new_indices->release();
auto indices_column = std::make_unique<cudf::column>(
indices_type, input.size(), std::move(*(contents.data.release())), rmm::device_buffer{}, 0);
std::unique_ptr<cudf::column> keys_column(std::move(matched_input->release().children.back()));
return cudf::make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(*(contents.null_mask.release())),
null_count);
}
} // end anonymous namespace
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> find_and_replace_all(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(values_to_replace.size() == replacement_values.size(),
"values_to_replace and replacement_values size mismatch.");
CUDF_EXPECTS(
input_col.type() == values_to_replace.type() && input_col.type() == replacement_values.type(),
"Columns type mismatch");
CUDF_EXPECTS(not values_to_replace.has_nulls(), "values_to_replace must not have nulls");
if (input_col.is_empty() or values_to_replace.is_empty() or replacement_values.is_empty()) {
return std::make_unique<cudf::column>(input_col, stream, mr);
}
return cudf::type_dispatcher<dispatch_storage_type>(input_col.type(),
replace_kernel_forwarder{},
input_col,
values_to_replace,
replacement_values,
stream,
mr);
}
} // namespace detail
/**
* @brief Replace elements from `input_col` according to the mapping `values_to_replace` to
* `replacement_values`, that is, replace all `values_to_replace[i]` present in `input_col`
* with `replacement_values[i]`.
*
* @param[in] input_col column_view of the data to be modified
* @param[in] values_to_replace column_view of the old values to be replaced
* @param[in] replacement_values column_view of the new values
*
* @returns output cudf::column with the modified data
*/
std::unique_ptr<cudf::column> find_and_replace_all(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::find_and_replace_all(input_col, values_to_replace, replacement_values, stream, mr);
}
} // namespace cudf
| 87bd30420a446a2de619ea2e4b117385eadaf7b0.cu | /*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Cristhian Alberto Gonzales Castillo <[email protected]>
* Copyright 2018 Alexander Ocsa <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/replace.hpp>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/find.h>
#include <thrust/pair.h>
#include <thrust/tuple.h>
namespace { // anonymous
static constexpr int BLOCK_SIZE = 256;
// return the new_value for output column at index `idx`
template <class T, bool replacement_has_nulls>
__device__ auto get_new_value(cudf::size_type idx,
T const* __restrict__ input_data,
T const* __restrict__ values_to_replace_begin,
T const* __restrict__ values_to_replace_end,
T const* __restrict__ d_replacement_values,
cudf::bitmask_type const* __restrict__ replacement_valid)
{
auto found_ptr =
thrust::find(thrust::seq, values_to_replace_begin, values_to_replace_end, input_data[idx]);
T new_value{};
bool output_is_valid{true};
if (found_ptr != values_to_replace_end) {
auto d = thrust::distance(values_to_replace_begin, found_ptr);
new_value = d_replacement_values[d];
if (replacement_has_nulls) { output_is_valid = cudf::bit_is_set(replacement_valid, d); }
} else {
new_value = input_data[idx];
}
return thrust::make_pair(new_value, output_is_valid);
}
__device__ int get_new_string_value(cudf::size_type idx,
cudf::column_device_view& input,
cudf::column_device_view& values_to_replace,
cudf::column_device_view&)
{
cudf::string_view input_string = input.element<cudf::string_view>(idx);
int match = -1;
for (int i = 0; i < values_to_replace.size(); i++) {
cudf::string_view value_string = values_to_replace.element<cudf::string_view>(i);
if (input_string == value_string) {
match = i;
break;
}
}
return match;
}
/**
* @brief Kernel which does the first pass of strings replace.
*
* It computes the output null_mask, null_count, and the offsets.
*
* @param input The input column to replace strings in.
* @param values_to_replace The string values to replace.
* @param replacement The replacement values.
* @param offsets The column which will contain the offsets of the new string column
* @param indices Temporary column used to store the replacement indices
* @param output_valid The output null_mask
* @param output_valid_count The output valid count
*/
template <bool input_has_nulls, bool replacement_has_nulls>
__global__ void replace_strings_first_pass(cudf::column_device_view input,
cudf::column_device_view values_to_replace,
cudf::column_device_view replacement,
cudf::mutable_column_device_view offsets,
cudf::mutable_column_device_view indices,
cudf::bitmask_type* output_valid,
cudf::size_type* __restrict__ output_valid_count)
{
cudf::size_type nrows = input.size();
auto tid = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::detail::grid_1d::grid_stride();
uint32_t active_mask = 0xffff'ffffu;
active_mask = __ballot_sync(active_mask, tid < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (tid < nrows) {
auto const idx = static_cast<cudf::size_type>(tid);
bool input_is_valid = true;
if (input_has_nulls) input_is_valid = input.is_valid_nocheck(idx);
bool output_is_valid = input_is_valid;
if (input_is_valid) {
int result = get_new_string_value(idx, input, values_to_replace, replacement);
cudf::string_view output = (result == -1) ? input.element<cudf::string_view>(idx)
: replacement.element<cudf::string_view>(result);
offsets.data<cudf::size_type>()[idx] = output.size_bytes();
indices.data<cudf::size_type>()[idx] = result;
if (replacement_has_nulls && result != -1) {
output_is_valid = replacement.is_valid_nocheck(result);
}
} else {
offsets.data<cudf::size_type>()[idx] = 0;
indices.data<cudf::size_type>()[idx] = -1;
}
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output_valid[cudf::word_index(idx)] = bitmask;
valid_sum += __popc(bitmask);
}
tid += stride;
active_mask = __ballot_sync(active_mask, tid < nrows);
}
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
/**
* @brief Kernel which does the second pass of strings replace.
*
* It copies the string data needed from input and replacement into the new strings column chars
* column.
*
* @param input The input column
* @param replacement The replacement values
* @param offsets The offsets column of the new strings column
* @param strings The chars column of the new strings column
* @param indices Temporary column used to store the replacement indices.
*/
template <bool input_has_nulls, bool replacement_has_nulls>
__global__ void replace_strings_second_pass(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::mutable_column_device_view offsets,
cudf::mutable_column_device_view strings,
cudf::mutable_column_device_view indices)
{
cudf::size_type nrows = input.size();
auto tid = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::detail::grid_1d::grid_stride();
while (tid < nrows) {
auto const idx = static_cast<cudf::size_type>(tid);
auto const replace_idx = indices.element<cudf::size_type>(idx);
bool output_is_valid = true;
bool input_is_valid = true;
if (input_has_nulls) {
input_is_valid = input.is_valid_nocheck(idx);
output_is_valid = input_is_valid;
}
if (replacement_has_nulls && replace_idx != -1) {
output_is_valid = replacement.is_valid_nocheck(replace_idx);
}
if (output_is_valid) {
cudf::string_view output = (replace_idx == -1)
? input.element<cudf::string_view>(idx)
: replacement.element<cudf::string_view>(replace_idx);
std::memcpy(strings.data<char>() + offsets.data<cudf::size_type>()[idx],
output.data(),
output.size_bytes());
}
tid += stride;
}
}
/**
* @brief Kernel that replaces elements from `output_data` given the following
* rule: replace all `values_to_replace[i]` in [values_to_replace_begin`,
* `values_to_replace_end`) present in `output_data` with `d_replacement_values[i]`.
*
* @tparam input_has_nulls `true` if output column has valid mask, `false` otherwise
* @tparam replacement_has_nulls `true` if replacement_values column has valid mask, `false`
* otherwise The input_has_nulls and replacement_has_nulls template parameters allows us to
* specialize this kernel for the different scenario for performance without writing different
* kernel.
*
* @param[in] input_data Device array with the data to be modified
* @param[in] input_valid Valid mask associated with input_data
* @param[out] output_data Device array to store the data from input_data
* @param[out] output_valid Valid mask associated with output_data
* @param[out] output_valid_count #valid in output column
* @param[in] nrows # rows in `output_data`
* @param[in] values_to_replace_begin Device pointer to the beginning of the sequence
* of old values to be replaced
* @param[in] values_to_replace_end Device pointer to the end of the sequence
* of old values to be replaced
* @param[in] d_replacement_values Device array with the new values
* @param[in] replacement_valid Valid mask associated with d_replacement_values
*/
template <class T, bool input_has_nulls, bool replacement_has_nulls>
__global__ void replace_kernel(cudf::column_device_view input,
cudf::mutable_column_device_view output,
cudf::size_type* __restrict__ output_valid_count,
cudf::size_type nrows,
cudf::column_device_view values_to_replace,
cudf::column_device_view replacement)
{
T* __restrict__ output_data = output.data<T>();
auto tid = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::detail::grid_1d::grid_stride();
uint32_t active_mask = 0xffff'ffffu;
active_mask = __ballot_sync(active_mask, tid < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (tid < nrows) {
auto const idx = static_cast<cudf::size_type>(tid);
bool output_is_valid{true};
bool input_is_valid{true};
if (input_has_nulls) {
input_is_valid = input.is_valid_nocheck(idx);
output_is_valid = input_is_valid;
}
if (input_is_valid)
thrust::tie(output_data[idx], output_is_valid) = get_new_value<T, replacement_has_nulls>(
idx,
input.data<T>(),
values_to_replace.data<T>(),
values_to_replace.data<T>() + values_to_replace.size(),
replacement.data<T>(),
replacement.null_mask());
/* output valid counts calculations*/
if (input_has_nulls or replacement_has_nulls) {
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output.set_mask_word(cudf::word_index(idx), bitmask);
valid_sum += __popc(bitmask);
}
}
tid += stride;
active_mask = __ballot_sync(active_mask, tid < nrows);
}
if (input_has_nulls or replacement_has_nulls) {
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count =
cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
}
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_kernel` with the appropriate data types.
*/
struct replace_kernel_forwarder {
template <typename col_type, std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
auto replace = [&] {
if (input_col.has_nulls())
return replacement_values.has_nulls() ? replace_kernel<col_type, true, true>
: replace_kernel<col_type, true, false>;
else
return replacement_values.has_nulls() ? replace_kernel<col_type, false, true>
: replace_kernel<col_type, false, false>;
}();
auto output = [&] {
auto const mask_allocation_policy = input_col.has_nulls() || replacement_values.has_nulls()
? cudf::mask_allocation_policy::ALWAYS
: cudf::mask_allocation_policy::NEVER;
return cudf::detail::allocate_like(
input_col, input_col.size(), mask_allocation_policy, stream, mr);
}();
auto output_view = output->mutable_view();
auto grid = cudf::detail::grid_1d{output_view.size(), BLOCK_SIZE, 1};
auto device_in = cudf::column_device_view::create(input_col, stream);
auto device_out = cudf::mutable_column_device_view::create(output_view, stream);
auto device_values_to_replace = cudf::column_device_view::create(values_to_replace, stream);
auto device_replacement_values = cudf::column_device_view::create(replacement_values, stream);
replace<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>(*device_in,
*device_out,
valid_count,
output_view.size(),
*device_values_to_replace,
*device_replacement_values);
if (output_view.nullable()) {
output->set_null_count(output->size() - valid_counter.value(stream));
}
return output;
}
template <typename col_type, std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const&,
cudf::column_view const&,
cudf::column_view const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("No specialization exists for this type");
}
};
template <>
std::unique_ptr<cudf::column> replace_kernel_forwarder::operator()<cudf::string_view>(
cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
auto replace_first = replace_strings_first_pass<true, false>;
auto replace_second = replace_strings_second_pass<true, false>;
if (input_col.has_nulls()) {
if (replacement_values.has_nulls()) {
replace_first = replace_strings_first_pass<true, true>;
replace_second = replace_strings_second_pass<true, true>;
}
} else {
if (replacement_values.has_nulls()) {
replace_first = replace_strings_first_pass<false, true>;
replace_second = replace_strings_second_pass<false, true>;
} else {
replace_first = replace_strings_first_pass<false, false>;
replace_second = replace_strings_second_pass<false, false>;
}
}
// Create new offsets column to use in kernel
std::unique_ptr<cudf::column> sizes =
cudf::make_numeric_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
input_col.size(),
cudf::mask_state::UNALLOCATED,
stream);
std::unique_ptr<cudf::column> indices =
cudf::make_numeric_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
input_col.size(),
cudf::mask_state::UNALLOCATED,
stream);
auto sizes_view = sizes->mutable_view();
auto indices_view = indices->mutable_view();
auto device_in = cudf::column_device_view::create(input_col, stream);
auto device_values_to_replace = cudf::column_device_view::create(values_to_replace, stream);
auto device_replacement = cudf::column_device_view::create(replacement_values, stream);
auto device_sizes = cudf::mutable_column_device_view::create(sizes_view, stream);
auto device_indices = cudf::mutable_column_device_view::create(indices_view, stream);
rmm::device_buffer valid_bits =
cudf::detail::create_null_mask(input_col.size(), cudf::mask_state::UNINITIALIZED, stream, mr);
// Call first pass kernel to get sizes in offsets
cudf::detail::grid_1d grid{input_col.size(), BLOCK_SIZE, 1};
replace_first<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>(
*device_in,
*device_values_to_replace,
*device_replacement,
*device_sizes,
*device_indices,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
valid_count);
auto [offsets, bytes] = cudf::detail::make_offsets_child_column(
sizes_view.begin<cudf::size_type>(), sizes_view.end<cudf::size_type>(), stream, mr);
auto offsets_view = offsets->mutable_view();
auto device_offsets = cudf::mutable_column_device_view::create(offsets_view, stream);
// Allocate chars array and output null mask
cudf::size_type null_count = input_col.size() - valid_counter.value(stream);
std::unique_ptr<cudf::column> output_chars =
cudf::strings::detail::create_chars_child_column(bytes, stream, mr);
auto output_chars_view = output_chars->mutable_view();
auto device_chars = cudf::mutable_column_device_view::create(output_chars_view, stream);
replace_second<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>(
*device_in, *device_replacement, *device_offsets, *device_chars, *device_indices);
return cudf::make_strings_column(input_col.size(),
std::move(offsets),
std::move(output_chars),
null_count,
std::move(valid_bits));
}
template <>
std::unique_ptr<cudf::column> replace_kernel_forwarder::operator()<cudf::dictionary32>(
cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto input = cudf::dictionary_column_view(input_col);
auto values = cudf::dictionary_column_view(values_to_replace);
auto replacements = cudf::dictionary_column_view(replacement_values);
auto matched_input = [&] {
auto new_keys = cudf::detail::concatenate(
std::vector<cudf::column_view>({values.keys(), replacements.keys()}),
stream,
rmm::mr::get_current_device_resource());
return cudf::dictionary::detail::add_keys(input, new_keys->view(), stream, mr);
}();
auto matched_view = cudf::dictionary_column_view(matched_input->view());
auto matched_values = cudf::dictionary::detail::set_keys(
values, matched_view.keys(), stream, rmm::mr::get_current_device_resource());
auto matched_replacements = cudf::dictionary::detail::set_keys(
replacements, matched_view.keys(), stream, rmm::mr::get_current_device_resource());
auto indices_type = matched_view.indices().type();
auto new_indices = cudf::type_dispatcher<cudf::dispatch_storage_type>(
indices_type,
replace_kernel_forwarder{},
matched_view.get_indices_annotated(),
cudf::dictionary_column_view(matched_values->view()).indices(),
cudf::dictionary_column_view(matched_replacements->view()).get_indices_annotated(),
stream,
mr);
auto null_count = new_indices->null_count();
auto contents = new_indices->release();
auto indices_column = std::make_unique<cudf::column>(
indices_type, input.size(), std::move(*(contents.data.release())), rmm::device_buffer{}, 0);
std::unique_ptr<cudf::column> keys_column(std::move(matched_input->release().children.back()));
return cudf::make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(*(contents.null_mask.release())),
null_count);
}
} // end anonymous namespace
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> find_and_replace_all(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(values_to_replace.size() == replacement_values.size(),
"values_to_replace and replacement_values size mismatch.");
CUDF_EXPECTS(
input_col.type() == values_to_replace.type() && input_col.type() == replacement_values.type(),
"Columns type mismatch");
CUDF_EXPECTS(not values_to_replace.has_nulls(), "values_to_replace must not have nulls");
if (input_col.is_empty() or values_to_replace.is_empty() or replacement_values.is_empty()) {
return std::make_unique<cudf::column>(input_col, stream, mr);
}
return cudf::type_dispatcher<dispatch_storage_type>(input_col.type(),
replace_kernel_forwarder{},
input_col,
values_to_replace,
replacement_values,
stream,
mr);
}
} // namespace detail
/**
* @brief Replace elements from `input_col` according to the mapping `values_to_replace` to
* `replacement_values`, that is, replace all `values_to_replace[i]` present in `input_col`
* with `replacement_values[i]`.
*
* @param[in] input_col column_view of the data to be modified
* @param[in] values_to_replace column_view of the old values to be replaced
* @param[in] replacement_values column_view of the new values
*
* @returns output cudf::column with the modified data
*/
std::unique_ptr<cudf::column> find_and_replace_all(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::find_and_replace_all(input_col, values_to_replace, replacement_values, stream, mr);
}
} // namespace cudf
|
afef8019166504dd5b99aed76dee584b5e6ef360.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// Note: this implementation originates from the Caffe2 ROIAlignRotated Op
// and PyTorch ROIAlign (non-rotated) Op implementations.
// The key difference between this implementation and those ones is
// we don't do "legacy offset" in this version, as there aren't many previous
// works, if any, using the "legacy" ROIAlignRotated Op.
// This would make the interface a bit cleaner.
namespace cvpods {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
const int height,
const int width,
T y,
T x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y < 0) {
y = 0;
}
if (x < 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y < 0) {
y = 0;
}
if (x < 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
} // namespace
template <typename T>
__global__ void RoIAlignRotatedForward(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* current_roi = rois + n * 6;
int roi_batch_ind = current_roi[0];
// Do not use rounding; this implementation detail is critical
// ROIAlignRotated supports align == true, i.e., continuous coordinate
// by default, thus the 0.5 offset
T offset = (T)0.5;
T roi_center_w = current_roi[1] * spatial_scale - offset;
T roi_center_h = current_roi[2] * spatial_scale - offset;
T roi_width = current_roi[3] * spatial_scale;
T roi_height = current_roi[4] * spatial_scale;
T theta = current_roi[5] * M_PI / 180.0;
T cos_theta = cos(theta);
T sin_theta = sin(theta);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
// We do average (inte gral) pooling inside a bin
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T y = yy * cos_theta - xx * sin_theta + roi_center_h;
T x = yy * sin_theta + xx * cos_theta + roi_center_w;
T val = bilinear_interpolate(offset_input, height, width, y, x);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__global__ void RoIAlignRotatedBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* current_roi = rois + n * 6;
int roi_batch_ind = current_roi[0];
// Do not use rounding; this implementation detail is critical
// ROIAlignRotated supports align == true, i.e., continuous coordinate
// by default, thus the 0.5 offset
T offset = (T)0.5;
T roi_center_w = current_roi[1] * spatial_scale - offset;
T roi_center_h = current_roi[2] * spatial_scale - offset;
T roi_width = current_roi[3] * spatial_scale;
T roi_height = current_roi[4] * spatial_scale;
T theta = current_roi[5] * M_PI / 180.0;
T cos_theta = cos(theta);
T sin_theta = sin(theta);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T y = yy * cos_theta - xx * sin_theta + roi_center_h;
T x = yy * sin_theta + xx * cos_theta + roi_center_w;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
at::Tensor ROIAlignRotated_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlignRotated_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(
input.scalar_type(), "ROIAlignRotated_forward", [&] {
hipLaunchKernelGGL(( RoIAlignRotatedForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
hipDeviceSynchronize();
AT_CUDA_CHECK(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlignRotated_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(
grad.scalar_type(), "ROIAlignRotated_backward", [&] {
hipLaunchKernelGGL(( RoIAlignRotatedBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace cvpods
| afef8019166504dd5b99aed76dee584b5e6ef360.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// Note: this implementation originates from the Caffe2 ROIAlignRotated Op
// and PyTorch ROIAlign (non-rotated) Op implementations.
// The key difference between this implementation and those ones is
// we don't do "legacy offset" in this version, as there aren't many previous
// works, if any, using the "legacy" ROIAlignRotated Op.
// This would make the interface a bit cleaner.
namespace cvpods {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
const int height,
const int width,
T y,
T x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y < 0) {
y = 0;
}
if (x < 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y < 0) {
y = 0;
}
if (x < 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
} // namespace
template <typename T>
__global__ void RoIAlignRotatedForward(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* current_roi = rois + n * 6;
int roi_batch_ind = current_roi[0];
// Do not use rounding; this implementation detail is critical
// ROIAlignRotated supports align == true, i.e., continuous coordinate
// by default, thus the 0.5 offset
T offset = (T)0.5;
T roi_center_w = current_roi[1] * spatial_scale - offset;
T roi_center_h = current_roi[2] * spatial_scale - offset;
T roi_width = current_roi[3] * spatial_scale;
T roi_height = current_roi[4] * spatial_scale;
T theta = current_roi[5] * M_PI / 180.0;
T cos_theta = cos(theta);
T sin_theta = sin(theta);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
// We do average (inte gral) pooling inside a bin
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T y = yy * cos_theta - xx * sin_theta + roi_center_h;
T x = yy * sin_theta + xx * cos_theta + roi_center_w;
T val = bilinear_interpolate(offset_input, height, width, y, x);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__global__ void RoIAlignRotatedBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* current_roi = rois + n * 6;
int roi_batch_ind = current_roi[0];
// Do not use rounding; this implementation detail is critical
// ROIAlignRotated supports align == true, i.e., continuous coordinate
// by default, thus the 0.5 offset
T offset = (T)0.5;
T roi_center_w = current_roi[1] * spatial_scale - offset;
T roi_center_h = current_roi[2] * spatial_scale - offset;
T roi_width = current_roi[3] * spatial_scale;
T roi_height = current_roi[4] * spatial_scale;
T theta = current_roi[5] * M_PI / 180.0;
T cos_theta = cos(theta);
T sin_theta = sin(theta);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T y = yy * cos_theta - xx * sin_theta + roi_center_h;
T x = yy * sin_theta + xx * cos_theta + roi_center_w;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
at::Tensor ROIAlignRotated_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlignRotated_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(
input.scalar_type(), "ROIAlignRotated_forward", [&] {
RoIAlignRotatedForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
cudaDeviceSynchronize();
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlignRotated_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(
grad.scalar_type(), "ROIAlignRotated_backward", [&] {
RoIAlignRotatedBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace cvpods
|
b2c98993b112624e6901414fdef2930470a6dc51.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <glew.h>
#include <freeglut.h>
#include <cudaDefs.h>
#include <imageManager.h>
// includes, cuda
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop
#include "imageKernels.cuh"
namespace lesson6 {
#define BLOCK_DIM 8
hipError_t error = hipSuccess;
//CUDA variables
unsigned int imageWidth;
unsigned int imageHeight;
unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit
unsigned int imagePitch;
cudaGraphicsResource_t cudaPBOResource;
cudaGraphicsResource_t cudaTexResource;
texture<uchar4, 2, hipReadModeElementType> cudaTexRef;
hipChannelFormatDesc cudaTexChannelDesc;
KernelSetting ks;
unsigned char someValue = 0;
//OpenGL
unsigned int pboID;
unsigned int textureID;
unsigned int viewportWidth = 1024;
unsigned int viewportHeight = 1024;
#pragma region CUDA Routines
__global__ void applyFilter(const unsigned char someValue, const unsigned int pboWidth, const unsigned int pboHeight, unsigned char *pbo)
{
//TODO 9: Create a filter that replaces Red spectrum of RGBA pbo such that RED=someValue
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y *blockDim.y + threadIdx.y;
if (tx >= pboWidth || ty >= pboHeight) return;
int offset = ty * pboWidth * 4 + tx * 4;
uchar4 v = tex2D(cudaTexRef, tx, ty);
pbo[offset] = v.x;
pbo[offset + 1] = v.y;
pbo[offset + 2] = v.z;
pbo[offset + 3] = v.w;
pbo[offset] = someValue;
}
void cudaWorker()
{
//TODO 3: Map cudaTexResource
hipGraphicsMapResources(1, &cudaTexResource, 0);
checkError();
//TODO 4: Get Mapped Array of cudaTexResource
hipArray* array;
hipGraphicsSubResourceGetMappedArray(&array, cudaTexResource, 0, 0);
checkError();
//TODO 5: Get cudaTexChannelDesc from previously obtained array
hipGetChannelDesc(&cudaTexChannelDesc, array);
checkError();
//TODO 6: Bind cudaTexRef to array
hipBindTextureToArray(&cudaTexRef, array, &cudaTexChannelDesc);
checkError();
//TODO 7: Map cudaPBOResource
hipGraphicsMapResources(1, &cudaPBOResource, 0);
checkError();
//TODO 7: Map Mapped pointer to cudaPBOResource data
unsigned char *pboData;
size_t pboSize;
hipGraphicsResourceGetMappedPointer((void**)&pboData, &pboSize, cudaPBOResource);
checkError();
//TODO 8: Set KernelSetting variable ks (dimBlock, dimGrid, etc.) such that block will have BLOCK_DIM x BLOCK_DIM threads
ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM);
ks.blockSize = BLOCK_DIM * BLOCK_DIM;
ks.dimGrid = dim3(getNumberOfParts(imageWidth, BLOCK_DIM), getNumberOfParts(imageHeight, BLOCK_DIM));
//Calling applyFileter kernel
someValue++;
if (someValue > 255) someValue = 0;
applyFilter << <ks.dimGrid, ks.dimBlock >> > (someValue, imageWidth, imageHeight, pboData);
//Following code release mapped resources, unbinds texture and ensures that PBO data will be coppied into OpenGL texture. Do not modify following code!
hipUnbindTexture(&cudaTexRef);
hipGraphicsUnmapResources(1, &cudaPBOResource, 0);
hipGraphicsUnmapResources(1, &cudaTexResource, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory
}
void initCUDAtex()
{
hipGLSetGLDevice(0);
checkError();
//CUDA Texture settings
cudaTexRef.normalized = false; //Otherwise TRUE to access with normalized texture coordinates
cudaTexRef.filterMode = hipFilterModePoint; //Otherwise texRef.filterMode = hipFilterModeLinear; for Linear interpolation of texels
cudaTexRef.addressMode[0] = hipAddressModeClamp; //No repeat texture pattern
cudaTexRef.addressMode[1] = hipAddressModeClamp; //No repeat texture pattern
//TODO 1: Register OpenGL texture to CUDA resource
hipGraphicsGLRegisterImage(&cudaTexResource, textureID, GL_TEXTURE_2D, hipGraphicsMapFlags::hipGraphicsMapFlagsReadOnly);
checkError();
//TODO 2: Register PBO to CUDA resource
hipGraphicsGLRegisterBuffer(&cudaPBOResource, pboID, hipGraphicsRegisterFlags::hipGraphicsRegisterFlagsWriteDiscard);
checkError();
}
void releaseCUDA()
{
hipGraphicsUnregisterResource(cudaPBOResource);
hipGraphicsUnregisterResource(cudaTexResource);
}
#pragma endregion
#pragma region OpenGL Routines - DO NOT MODIFY THIS SECTION !!!
void loadTexture(const char* imageFileName)
{
FreeImage_Initialise();
FIBITMAP *tmp = ImageManager::GenericLoader(imageFileName, 0);
imageWidth = FreeImage_GetWidth(tmp);
imageHeight = FreeImage_GetHeight(tmp);
imageBPP = FreeImage_GetBPP(tmp);
imagePitch = FreeImage_GetPitch(tmp);
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_2D, textureID);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
FreeImage_Unload(tmp);
}
void preparePBO()
{
glGenBuffers(1, &pboID);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID); // Make this the current UNPACK buffer (OpenGL is state-based)
glBufferData(GL_PIXEL_UNPACK_BUFFER, imageWidth * imageHeight * 4, NULL, GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image
}
void my_display()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textureID);
//I know this is a very old OpenGL, but we want to practice CUDA :-)
//Now it will be a wasted time to learn you current features of OpenGL. Sorry for that however, you can visit my second seminar dealing with Computer Graphics (CG2).
glBegin(GL_QUADS);
glTexCoord2d(0, 0); glVertex2d(0, 0);
glTexCoord2d(1, 0); glVertex2d(viewportWidth, 0);
glTexCoord2d(1, 1); glVertex2d(viewportWidth, viewportHeight);
glTexCoord2d(0, 1); glVertex2d(0, viewportHeight);
glEnd();
glDisable(GL_TEXTURE_2D);
glFlush();
glutSwapBuffers();
}
void my_resize(GLsizei w, GLsizei h)
{
viewportWidth = w;
viewportHeight = h;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, viewportWidth, viewportHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, viewportWidth, 0, viewportHeight);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glutPostRedisplay();
}
void my_idle()
{
cudaWorker();
glutPostRedisplay();
}
void initGL(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(viewportWidth, viewportHeight);
glutInitWindowPosition(0, 0);
glutCreateWindow(":-)");
glutDisplayFunc(my_display);
glutReshapeFunc(my_resize);
glutIdleFunc(my_idle);
glutSetCursor(GLUT_CURSOR_CROSSHAIR);
// initialize necessary OpenGL extensions
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glShadeModel(GL_SMOOTH);
glViewport(0, 0, viewportWidth, viewportHeight);
glFlush();
}
void releaseOpenGL()
{
if (textureID > 0)
glDeleteTextures(1, &textureID);
if (pboID > 0)
glDeleteBuffers(1, &pboID);
}
#pragma endregion
void releaseResources()
{
releaseCUDA();
releaseOpenGL();
}
void run(int argc, char *argv[])
{
initGL(argc, argv);
loadTexture("E:/Programming/School/PA II/src/TemplateProject2017/TemplateProject/lena.png");
preparePBO();
initCUDAtex();
//start rendering mainloop
glutMainLoop();
atexit(releaseResources);
}
} | b2c98993b112624e6901414fdef2930470a6dc51.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <glew.h>
#include <freeglut.h>
#include <cudaDefs.h>
#include <imageManager.h>
// includes, cuda
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop
#include "imageKernels.cuh"
namespace lesson6 {
#define BLOCK_DIM 8
cudaError_t error = cudaSuccess;
//CUDA variables
unsigned int imageWidth;
unsigned int imageHeight;
unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit
unsigned int imagePitch;
cudaGraphicsResource_t cudaPBOResource;
cudaGraphicsResource_t cudaTexResource;
texture<uchar4, 2, cudaReadModeElementType> cudaTexRef;
cudaChannelFormatDesc cudaTexChannelDesc;
KernelSetting ks;
unsigned char someValue = 0;
//OpenGL
unsigned int pboID;
unsigned int textureID;
unsigned int viewportWidth = 1024;
unsigned int viewportHeight = 1024;
#pragma region CUDA Routines
__global__ void applyFilter(const unsigned char someValue, const unsigned int pboWidth, const unsigned int pboHeight, unsigned char *pbo)
{
//TODO 9: Create a filter that replaces Red spectrum of RGBA pbo such that RED=someValue
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y *blockDim.y + threadIdx.y;
if (tx >= pboWidth || ty >= pboHeight) return;
int offset = ty * pboWidth * 4 + tx * 4;
uchar4 v = tex2D(cudaTexRef, tx, ty);
pbo[offset] = v.x;
pbo[offset + 1] = v.y;
pbo[offset + 2] = v.z;
pbo[offset + 3] = v.w;
pbo[offset] = someValue;
}
void cudaWorker()
{
//TODO 3: Map cudaTexResource
cudaGraphicsMapResources(1, &cudaTexResource, 0);
checkError();
//TODO 4: Get Mapped Array of cudaTexResource
cudaArray* array;
cudaGraphicsSubResourceGetMappedArray(&array, cudaTexResource, 0, 0);
checkError();
//TODO 5: Get cudaTexChannelDesc from previously obtained array
cudaGetChannelDesc(&cudaTexChannelDesc, array);
checkError();
//TODO 6: Bind cudaTexRef to array
cudaBindTextureToArray(&cudaTexRef, array, &cudaTexChannelDesc);
checkError();
//TODO 7: Map cudaPBOResource
cudaGraphicsMapResources(1, &cudaPBOResource, 0);
checkError();
//TODO 7: Map Mapped pointer to cudaPBOResource data
unsigned char *pboData;
size_t pboSize;
cudaGraphicsResourceGetMappedPointer((void**)&pboData, &pboSize, cudaPBOResource);
checkError();
//TODO 8: Set KernelSetting variable ks (dimBlock, dimGrid, etc.) such that block will have BLOCK_DIM x BLOCK_DIM threads
ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM);
ks.blockSize = BLOCK_DIM * BLOCK_DIM;
ks.dimGrid = dim3(getNumberOfParts(imageWidth, BLOCK_DIM), getNumberOfParts(imageHeight, BLOCK_DIM));
//Calling applyFileter kernel
someValue++;
if (someValue > 255) someValue = 0;
applyFilter << <ks.dimGrid, ks.dimBlock >> > (someValue, imageWidth, imageHeight, pboData);
//Following code release mapped resources, unbinds texture and ensures that PBO data will be coppied into OpenGL texture. Do not modify following code!
cudaUnbindTexture(&cudaTexRef);
cudaGraphicsUnmapResources(1, &cudaPBOResource, 0);
cudaGraphicsUnmapResources(1, &cudaTexResource, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory
}
void initCUDAtex()
{
cudaGLSetGLDevice(0);
checkError();
//CUDA Texture settings
cudaTexRef.normalized = false; //Otherwise TRUE to access with normalized texture coordinates
cudaTexRef.filterMode = cudaFilterModePoint; //Otherwise texRef.filterMode = cudaFilterModeLinear; for Linear interpolation of texels
cudaTexRef.addressMode[0] = cudaAddressModeClamp; //No repeat texture pattern
cudaTexRef.addressMode[1] = cudaAddressModeClamp; //No repeat texture pattern
//TODO 1: Register OpenGL texture to CUDA resource
cudaGraphicsGLRegisterImage(&cudaTexResource, textureID, GL_TEXTURE_2D, cudaGraphicsMapFlags::cudaGraphicsMapFlagsReadOnly);
checkError();
//TODO 2: Register PBO to CUDA resource
cudaGraphicsGLRegisterBuffer(&cudaPBOResource, pboID, cudaGraphicsRegisterFlags::cudaGraphicsRegisterFlagsWriteDiscard);
checkError();
}
void releaseCUDA()
{
cudaGraphicsUnregisterResource(cudaPBOResource);
cudaGraphicsUnregisterResource(cudaTexResource);
}
#pragma endregion
#pragma region OpenGL Routines - DO NOT MODIFY THIS SECTION !!!
void loadTexture(const char* imageFileName)
{
FreeImage_Initialise();
FIBITMAP *tmp = ImageManager::GenericLoader(imageFileName, 0);
imageWidth = FreeImage_GetWidth(tmp);
imageHeight = FreeImage_GetHeight(tmp);
imageBPP = FreeImage_GetBPP(tmp);
imagePitch = FreeImage_GetPitch(tmp);
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_2D, textureID);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
FreeImage_Unload(tmp);
}
void preparePBO()
{
glGenBuffers(1, &pboID);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID); // Make this the current UNPACK buffer (OpenGL is state-based)
glBufferData(GL_PIXEL_UNPACK_BUFFER, imageWidth * imageHeight * 4, NULL, GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image
}
void my_display()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textureID);
//I know this is a very old OpenGL, but we want to practice CUDA :-)
//Now it will be a wasted time to learn you current features of OpenGL. Sorry for that however, you can visit my second seminar dealing with Computer Graphics (CG2).
glBegin(GL_QUADS);
glTexCoord2d(0, 0); glVertex2d(0, 0);
glTexCoord2d(1, 0); glVertex2d(viewportWidth, 0);
glTexCoord2d(1, 1); glVertex2d(viewportWidth, viewportHeight);
glTexCoord2d(0, 1); glVertex2d(0, viewportHeight);
glEnd();
glDisable(GL_TEXTURE_2D);
glFlush();
glutSwapBuffers();
}
void my_resize(GLsizei w, GLsizei h)
{
viewportWidth = w;
viewportHeight = h;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, viewportWidth, viewportHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, viewportWidth, 0, viewportHeight);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glutPostRedisplay();
}
void my_idle()
{
cudaWorker();
glutPostRedisplay();
}
void initGL(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(viewportWidth, viewportHeight);
glutInitWindowPosition(0, 0);
glutCreateWindow(":-)");
glutDisplayFunc(my_display);
glutReshapeFunc(my_resize);
glutIdleFunc(my_idle);
glutSetCursor(GLUT_CURSOR_CROSSHAIR);
// initialize necessary OpenGL extensions
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glShadeModel(GL_SMOOTH);
glViewport(0, 0, viewportWidth, viewportHeight);
glFlush();
}
void releaseOpenGL()
{
if (textureID > 0)
glDeleteTextures(1, &textureID);
if (pboID > 0)
glDeleteBuffers(1, &pboID);
}
#pragma endregion
void releaseResources()
{
releaseCUDA();
releaseOpenGL();
}
void run(int argc, char *argv[])
{
initGL(argc, argv);
loadTexture("E:/Programming/School/PA II/src/TemplateProject2017/TemplateProject/lena.png");
preparePBO();
initCUDAtex();
//start rendering mainloop
glutMainLoop();
atexit(releaseResources);
}
} |
f362c1eaded6a0091909a29808b1d4ef0014fc0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/relu_der_out.hpp"
template <typename T>
__global__ void relu_der_out_kernel(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = x[incx * index] > T(0) ? alpha : T(0);
}
}
template <typename T>
__global__ void relu_der_out_kernel1(size_t n, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = x[incx * index] > T(0) ? T(1) : T(0);
}
}
template <typename T>
__global__ void relu_der_out_kernel0(size_t n, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = T(0);
}
}
template <typename T>
void relu_der_out_kernel_run(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, relu_der_out_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( relu_der_out_kernel<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, alpha, x, incx, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
template <typename T>
void relu_der_out_kernel1_run(size_t n, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, relu_der_out_kernel1<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( relu_der_out_kernel1<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, x, incx, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
template <typename T>
void relu_der_out_kernel0_run(size_t n, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, relu_der_out_kernel0<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( relu_der_out_kernel0<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
void egblas_srelu_der_out(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) {
if (alpha == 1.0f) {
relu_der_out_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0f) {
relu_der_out_kernel0_run(n, y, incy);
} else {
relu_der_out_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_drelu_der_out(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) {
if (alpha == 1.0) {
relu_der_out_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0) {
relu_der_out_kernel0_run(n, y, incy);
} else {
relu_der_out_kernel_run(n, alpha, x, incx, y, incy);
}
}
| f362c1eaded6a0091909a29808b1d4ef0014fc0b.cu | //=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/relu_der_out.hpp"
template <typename T>
__global__ void relu_der_out_kernel(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = x[incx * index] > T(0) ? alpha : T(0);
}
}
template <typename T>
__global__ void relu_der_out_kernel1(size_t n, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = x[incx * index] > T(0) ? T(1) : T(0);
}
}
template <typename T>
__global__ void relu_der_out_kernel0(size_t n, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = T(0);
}
}
template <typename T>
void relu_der_out_kernel_run(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, relu_der_out_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
relu_der_out_kernel<T><<<gridSize, blockSize>>>(n, alpha, x, incx, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
template <typename T>
void relu_der_out_kernel1_run(size_t n, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, relu_der_out_kernel1<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
relu_der_out_kernel1<T><<<gridSize, blockSize>>>(n, x, incx, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
template <typename T>
void relu_der_out_kernel0_run(size_t n, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, relu_der_out_kernel0<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
relu_der_out_kernel0<T><<<gridSize, blockSize>>>(n, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
void egblas_srelu_der_out(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) {
if (alpha == 1.0f) {
relu_der_out_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0f) {
relu_der_out_kernel0_run(n, y, incy);
} else {
relu_der_out_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_drelu_der_out(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) {
if (alpha == 1.0) {
relu_der_out_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0) {
relu_der_out_kernel0_run(n, y, incy);
} else {
relu_der_out_kernel_run(n, alpha, x, incx, y, incy);
}
}
|
21ef748c949e7c7cada043cc6472d4b09185a41f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5 * powf(variance[filter] + .000001f, (float)(-3./2.));
}
} | 21ef748c949e7c7cada043cc6472d4b09185a41f.cu | #include "includes.h"
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5 * powf(variance[filter] + .000001f, (float)(-3./2.));
}
} |
cbbe20b5411ff459d4eccbfd5eb9fc491ef29ad7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void CudaPermuteCudnnToPV( float *dest, float *src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY) {
// parameter dimensions are in dest PV format
int srcNx = nx / manyScaleX;
int srcNy = ny / manyScaleY;
int srcInFeatures = inFeatures * manyScaleX * manyScaleY;
int kDest = (blockIdx.x * blockDim.x) + threadIdx.x;
if (kDest < outFeatures * ny * nx * inFeatures) {
int kOF = kDest / (ny * nx * inFeatures);
int kY = (kDest % (ny * nx * inFeatures)) / (nx * inFeatures);
int kX = (kDest % (nx * inFeatures)) / inFeatures;
int kIF = (kDest % inFeatures);
// Recalculate x, y, and f based on manyScale
kIF = kIF + inFeatures * (kX % manyScaleX + (kY % manyScaleY) * manyScaleX);
kX = kX / manyScaleX;
kY = kY / manyScaleY;
int sOF = srcInFeatures * srcNy * srcNx;
int sIF = srcNy * srcNx;
int sY = srcNx;
int kSrc = kOF * sOF + kIF * sIF + kY * sY + kX;
dest[kDest] = src[kSrc];
}
} | cbbe20b5411ff459d4eccbfd5eb9fc491ef29ad7.cu | #include "includes.h"
__global__ void CudaPermuteCudnnToPV( float *dest, float *src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY) {
// parameter dimensions are in dest PV format
int srcNx = nx / manyScaleX;
int srcNy = ny / manyScaleY;
int srcInFeatures = inFeatures * manyScaleX * manyScaleY;
int kDest = (blockIdx.x * blockDim.x) + threadIdx.x;
if (kDest < outFeatures * ny * nx * inFeatures) {
int kOF = kDest / (ny * nx * inFeatures);
int kY = (kDest % (ny * nx * inFeatures)) / (nx * inFeatures);
int kX = (kDest % (nx * inFeatures)) / inFeatures;
int kIF = (kDest % inFeatures);
// Recalculate x, y, and f based on manyScale
kIF = kIF + inFeatures * (kX % manyScaleX + (kY % manyScaleY) * manyScaleX);
kX = kX / manyScaleX;
kY = kY / manyScaleY;
int sOF = srcInFeatures * srcNy * srcNx;
int sIF = srcNy * srcNx;
int sY = srcNx;
int kSrc = kOF * sOF + kIF * sIF + kY * sY + kX;
dest[kDest] = src[kSrc];
}
} |
bc6e59fcae45b316da128570bba9ac2fbac8008d.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| bc6e59fcae45b316da128570bba9ac2fbac8008d.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
317b4cfbdef8949f61c2855f12ae6a9d77e750de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#ifndef _BICUBICTEXTURE_CU_
#define _BICUBICTEXTURE_CU_
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <helper_math.h>
// includes, cuda
#include <helper_cuda.h>
#include "transform_kernel.cuh"
#include "transform_common.h"
hipArray *d_imageArray = 0;
extern "C"
void initTexture(int imageWidth, int imageHeight, uchar *h_data)
{
// allocate array and copy image data
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
checkCudaErrors(hipMallocArray(&d_imageArray, &channelDesc, imageWidth, imageHeight));
uint size = imageWidth * imageHeight * sizeof(uchar);
checkCudaErrors(hipMemcpyToArray(d_imageArray, 0, 0, h_data, size, hipMemcpyHostToDevice));
free(h_data);
// set texture parameters
tex.addressMode[0] = hipAddressModeClamp;
tex.addressMode[1] = hipAddressModeClamp;
tex.filterMode = hipFilterModeLinear;
tex.normalized = false; // access with integer texture coordinates
getLastCudaError("initTexture");
// Bind the array to the texture
checkCudaErrors(hipBindTextureToArray(tex, d_imageArray));
// bind same array to 2nd texture reference with point sampling
tex2.addressMode[0] = hipAddressModeClamp;
tex2.addressMode[1] = hipAddressModeClamp;
tex2.filterMode = hipFilterModePoint;
tex2.normalized = false; // access with integer texture coordinates
checkCudaErrors(hipBindTextureToArray(tex2, d_imageArray));
}
extern "C"
void unbindTexture()
{
checkCudaErrors(hipUnbindTexture(tex));
checkCudaErrors(hipUnbindTexture(tex2));
}
extern "C"
void freeTexture()
{
checkCudaErrors(hipFreeArray(d_imageArray));
}
extern "C"
void initConstant(double *h_srcGeoTransform, double *h_srcToWGS84, double *h_srcDatum,
double *h_dstGeoTransform, double *h_dstToWGS84, double *h_dstDatum)
{
hipMemcpyToSymbol(c_srcGeoTransform, h_srcGeoTransform, 6 * sizeof(double));
hipMemcpyToSymbol(c_srcToWGS84, h_srcToWGS84, 7 * sizeof(double));
hipMemcpyToSymbol(c_srcDatum, h_srcDatum, 4 * sizeof(double));
hipMemcpyToSymbol(c_dstGeoTransform, h_dstGeoTransform, 6 * sizeof(double));
hipMemcpyToSymbol(c_dstToWGS84, h_dstToWGS84, 7 * sizeof(double));
hipMemcpyToSymbol(c_dstDatum, h_dstDatum, 4 * sizeof(double));
}
// transform coordinate without projection using CUDA
extern "C"
void transformGPU(int width, int height, int2 *coord,
dim3 blockSize, dim3 gridSize)
{
hipLaunchKernelGGL(( transformNoProjGPU), dim3(gridSize), dim3(blockSize), 0, 0, coord, width, height);
getLastCudaError("kernel failed");
}
extern "C"
void transformGPUTest(int width, int height, int2 *coord,
dim3 blockSize, dim3 gridSize)
{
hipLaunchKernelGGL(( transformTest), dim3(gridSize), dim3(blockSize), 0, 0, coord, width, height);
getLastCudaError("kernel failed");
}
// render image using CUDA
extern "C"
void render(int width, int height, float tx, float ty, float scale, float cx, float cy,
dim3 blockSize, dim3 gridSize, int filter_mode, uchar *output, int2 *coord)
{
// call CUDA kernel, writing results to PBO memory
switch (filter_mode)
{
case MODE_NEAREST:
tex.filterMode = hipFilterModePoint;
hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, output, coord, width, height, tx, ty, scale, cx, cy);
break;
case MODE_BILINEAR:
tex.filterMode = hipFilterModeLinear;
hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, output, coord, width, height, tx, ty, scale, cx, cy);
break;
case MODE_BICUBIC:
tex.filterMode = hipFilterModePoint;
hipLaunchKernelGGL(( d_renderBicubic), dim3(gridSize), dim3(blockSize), 0, 0, output, coord, width, height, tx, ty, scale, cx, cy);
break;
case MODE_FAST_BICUBIC:
tex.filterMode = hipFilterModeLinear;
hipLaunchKernelGGL(( d_renderFastBicubic), dim3(gridSize), dim3(blockSize), 0, 0, output, coord, width, height, tx, ty, scale, cx, cy);
break;
default:
tex.filterMode = hipFilterModePoint;
hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, output, coord, width, height, tx, ty, scale, cx, cy);
break;
}
getLastCudaError("kernel failed");
}
#endif
| 317b4cfbdef8949f61c2855f12ae6a9d77e750de.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#ifndef _BICUBICTEXTURE_CU_
#define _BICUBICTEXTURE_CU_
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <helper_math.h>
// includes, cuda
#include <helper_cuda.h>
#include "transform_kernel.cuh"
#include "transform_common.h"
cudaArray *d_imageArray = 0;
extern "C"
void initTexture(int imageWidth, int imageHeight, uchar *h_data)
{
// allocate array and copy image data
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
checkCudaErrors(cudaMallocArray(&d_imageArray, &channelDesc, imageWidth, imageHeight));
uint size = imageWidth * imageHeight * sizeof(uchar);
checkCudaErrors(cudaMemcpyToArray(d_imageArray, 0, 0, h_data, size, cudaMemcpyHostToDevice));
free(h_data);
// set texture parameters
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = false; // access with integer texture coordinates
getLastCudaError("initTexture");
// Bind the array to the texture
checkCudaErrors(cudaBindTextureToArray(tex, d_imageArray));
// bind same array to 2nd texture reference with point sampling
tex2.addressMode[0] = cudaAddressModeClamp;
tex2.addressMode[1] = cudaAddressModeClamp;
tex2.filterMode = cudaFilterModePoint;
tex2.normalized = false; // access with integer texture coordinates
checkCudaErrors(cudaBindTextureToArray(tex2, d_imageArray));
}
extern "C"
void unbindTexture()
{
checkCudaErrors(cudaUnbindTexture(tex));
checkCudaErrors(cudaUnbindTexture(tex2));
}
extern "C"
void freeTexture()
{
checkCudaErrors(cudaFreeArray(d_imageArray));
}
extern "C"
void initConstant(double *h_srcGeoTransform, double *h_srcToWGS84, double *h_srcDatum,
double *h_dstGeoTransform, double *h_dstToWGS84, double *h_dstDatum)
{
cudaMemcpyToSymbol(c_srcGeoTransform, h_srcGeoTransform, 6 * sizeof(double));
cudaMemcpyToSymbol(c_srcToWGS84, h_srcToWGS84, 7 * sizeof(double));
cudaMemcpyToSymbol(c_srcDatum, h_srcDatum, 4 * sizeof(double));
cudaMemcpyToSymbol(c_dstGeoTransform, h_dstGeoTransform, 6 * sizeof(double));
cudaMemcpyToSymbol(c_dstToWGS84, h_dstToWGS84, 7 * sizeof(double));
cudaMemcpyToSymbol(c_dstDatum, h_dstDatum, 4 * sizeof(double));
}
// transform coordinate without projection using CUDA
extern "C"
void transformGPU(int width, int height, int2 *coord,
dim3 blockSize, dim3 gridSize)
{
transformNoProjGPU<<<gridSize, blockSize>>>(coord, width, height);
getLastCudaError("kernel failed");
}
extern "C"
void transformGPUTest(int width, int height, int2 *coord,
dim3 blockSize, dim3 gridSize)
{
transformTest<<<gridSize, blockSize>>>(coord, width, height);
getLastCudaError("kernel failed");
}
// render image using CUDA
extern "C"
void render(int width, int height, float tx, float ty, float scale, float cx, float cy,
dim3 blockSize, dim3 gridSize, int filter_mode, uchar *output, int2 *coord)
{
// call CUDA kernel, writing results to PBO memory
switch (filter_mode)
{
case MODE_NEAREST:
tex.filterMode = cudaFilterModePoint;
d_render<<<gridSize, blockSize>>>(output, coord, width, height, tx, ty, scale, cx, cy);
break;
case MODE_BILINEAR:
tex.filterMode = cudaFilterModeLinear;
d_render<<<gridSize, blockSize>>>(output, coord, width, height, tx, ty, scale, cx, cy);
break;
case MODE_BICUBIC:
tex.filterMode = cudaFilterModePoint;
d_renderBicubic<<<gridSize, blockSize>>>(output, coord, width, height, tx, ty, scale, cx, cy);
break;
case MODE_FAST_BICUBIC:
tex.filterMode = cudaFilterModeLinear;
d_renderFastBicubic<<<gridSize, blockSize>>>(output, coord, width, height, tx, ty, scale, cx, cy);
break;
default:
tex.filterMode = cudaFilterModePoint;
d_render<<<gridSize, blockSize>>>(output, coord, width, height, tx, ty, scale, cx, cy);
break;
}
getLastCudaError("kernel failed");
}
#endif
|
e496ab25b83bfe8f8782721793768baebc290ef5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "../include/gstack.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
using namespace std;
__global__ void test(float *output){
gpu_stl::stack<float> stk;
int idx = 0;
output[idx++] = stk.empty();
output[idx++] = stk.size();
output[idx++] = 10086;
for(int i=1;i<=20;++i){
stk.push(i*1.7);
output[idx++] = stk.empty();
output[idx++] = stk.size();
}
output[idx++] = 10086;
while(!stk.empty()){
output[idx++] = stk.empty();
output[idx++] = stk.size();
output[idx++] = stk.top();
stk.pop();
}
}
int main(){
def_dvec(float) dev_out(120, 0);
hipLaunchKernelGGL(( test), dim3(1), dim3(1), 0, 0, to_ptr(dev_out));
for(auto k:dev_out) cout<<k<<' ';
cout<<endl;
return 0;
}
| e496ab25b83bfe8f8782721793768baebc290ef5.cu | #include <iostream>
#include "../include/gstack.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
using namespace std;
__global__ void test(float *output){
gpu_stl::stack<float> stk;
int idx = 0;
output[idx++] = stk.empty();
output[idx++] = stk.size();
output[idx++] = 10086;
for(int i=1;i<=20;++i){
stk.push(i*1.7);
output[idx++] = stk.empty();
output[idx++] = stk.size();
}
output[idx++] = 10086;
while(!stk.empty()){
output[idx++] = stk.empty();
output[idx++] = stk.size();
output[idx++] = stk.top();
stk.pop();
}
}
int main(){
def_dvec(float) dev_out(120, 0);
test<<<1, 1>>>(to_ptr(dev_out));
for(auto k:dev_out) cout<<k<<' ';
cout<<endl;
return 0;
}
|
b58b3dbf21ca5bbfb5c9b546bedeb516f08721ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "core/warp_solver/SolverIterationData.h"
#include "core/warp_solver/solver_types.h"
#include "math/vector_ops.hpp"
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
__global__ void applyWarpFieldUpdateKernel(
const DeviceArrayView<DualQuaternion> warp_field,
const float* _warp_field_update,
DualQuaternion* updated_warpfield,
const float coef
) {
const auto tidx = threadIdx.x + blockDim.x * blockIdx.x;
if (tidx >= warp_field.Size()) return;
float3 twist_rot;
twist_rot.x = coef * _warp_field_update[6 * tidx];
twist_rot.y = coef * _warp_field_update[6 * tidx + 1];
twist_rot.z = coef * _warp_field_update[6 * tidx + 2];
float3 twist_trans;
twist_trans.x = coef * _warp_field_update[6 * tidx + 3];
twist_trans.y = coef * _warp_field_update[6 * tidx + 4];
twist_trans.z = coef * _warp_field_update[6 * tidx + 5];
mat34 SE3;
if (fabsf_sum(twist_rot) < 1e-4f) {
SE3.rot = mat33::identity();
}
else {
const float angle = norm(twist_rot);
const float3 axis = 1.0f / angle * twist_rot;
float c = cosf(angle);
float s = sinf(angle);
float t = 1.0f - c;
SE3.rot.m00() = t*axis.x*axis.x + c;
SE3.rot.m01() = t*axis.x*axis.y - axis.z*s;
SE3.rot.m02() = t*axis.x*axis.z + axis.y*s;
SE3.rot.m10() = t*axis.x*axis.y + axis.z*s;
SE3.rot.m11() = t*axis.y*axis.y + c;
SE3.rot.m12() = t*axis.y*axis.z - axis.x*s;
SE3.rot.m20() = t*axis.x*axis.z - axis.y*s;
SE3.rot.m21() = t*axis.y*axis.z + axis.x*s;
SE3.rot.m22() = t*axis.z*axis.z + c;
}
SE3.trans = twist_trans;
mat34 SE3_prev = warp_field[tidx];
SE3_prev = SE3 * SE3_prev;
updated_warpfield[tidx] = SE3_prev;
}
} // namespace device
} // namespace surfelwarp
void surfelwarp::SolverIterationData::ApplyWarpFieldUpdate(hipStream_t stream, float step) {
//Determine which node list updated to
const auto init_dq = CurrentWarpFieldInput();
DeviceArraySlice<DualQuaternion> updated_dq;
switch (m_updated_se3) {
case IterationInputFrom::WarpFieldInit:
case IterationInputFrom::SE3_Buffer_1:
updated_dq = node_se3_0_.ArraySlice();
break;
case IterationInputFrom::SE3_Buffer_0:
updated_dq = node_se3_1_.ArraySlice();
break;
}
//Invoke the kernel
dim3 blk(64);
dim3 grid(divUp(NumNodes(), blk.x));
hipLaunchKernelGGL(( device::applyWarpFieldUpdateKernel), dim3(grid), dim3(blk), 0, stream,
init_dq,
m_twist_update.Ptr(),
updated_dq.RawPtr(),
step
);
//Update the flag
updateIterationFlags();
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(hipStreamSynchronize(stream));
cudaSafeCall(hipGetLastError());
#endif
} | b58b3dbf21ca5bbfb5c9b546bedeb516f08721ae.cu | #include "core/warp_solver/SolverIterationData.h"
#include "core/warp_solver/solver_types.h"
#include "math/vector_ops.hpp"
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
__global__ void applyWarpFieldUpdateKernel(
const DeviceArrayView<DualQuaternion> warp_field,
const float* _warp_field_update,
DualQuaternion* updated_warpfield,
const float coef
) {
const auto tidx = threadIdx.x + blockDim.x * blockIdx.x;
if (tidx >= warp_field.Size()) return;
float3 twist_rot;
twist_rot.x = coef * _warp_field_update[6 * tidx];
twist_rot.y = coef * _warp_field_update[6 * tidx + 1];
twist_rot.z = coef * _warp_field_update[6 * tidx + 2];
float3 twist_trans;
twist_trans.x = coef * _warp_field_update[6 * tidx + 3];
twist_trans.y = coef * _warp_field_update[6 * tidx + 4];
twist_trans.z = coef * _warp_field_update[6 * tidx + 5];
mat34 SE3;
if (fabsf_sum(twist_rot) < 1e-4f) {
SE3.rot = mat33::identity();
}
else {
const float angle = norm(twist_rot);
const float3 axis = 1.0f / angle * twist_rot;
float c = cosf(angle);
float s = sinf(angle);
float t = 1.0f - c;
SE3.rot.m00() = t*axis.x*axis.x + c;
SE3.rot.m01() = t*axis.x*axis.y - axis.z*s;
SE3.rot.m02() = t*axis.x*axis.z + axis.y*s;
SE3.rot.m10() = t*axis.x*axis.y + axis.z*s;
SE3.rot.m11() = t*axis.y*axis.y + c;
SE3.rot.m12() = t*axis.y*axis.z - axis.x*s;
SE3.rot.m20() = t*axis.x*axis.z - axis.y*s;
SE3.rot.m21() = t*axis.y*axis.z + axis.x*s;
SE3.rot.m22() = t*axis.z*axis.z + c;
}
SE3.trans = twist_trans;
mat34 SE3_prev = warp_field[tidx];
SE3_prev = SE3 * SE3_prev;
updated_warpfield[tidx] = SE3_prev;
}
} // namespace device
} // namespace surfelwarp
void surfelwarp::SolverIterationData::ApplyWarpFieldUpdate(cudaStream_t stream, float step) {
//Determine which node list updated to
const auto init_dq = CurrentWarpFieldInput();
DeviceArraySlice<DualQuaternion> updated_dq;
switch (m_updated_se3) {
case IterationInputFrom::WarpFieldInit:
case IterationInputFrom::SE3_Buffer_1:
updated_dq = node_se3_0_.ArraySlice();
break;
case IterationInputFrom::SE3_Buffer_0:
updated_dq = node_se3_1_.ArraySlice();
break;
}
//Invoke the kernel
dim3 blk(64);
dim3 grid(divUp(NumNodes(), blk.x));
device::applyWarpFieldUpdateKernel<<<grid, blk, 0, stream>>>(
init_dq,
m_twist_update.Ptr(),
updated_dq.RawPtr(),
step
);
//Update the flag
updateIterationFlags();
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
} |
6b6ecc8a760044c10722b21699e28ebfcfae4978.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include "kernel_hip.cuh"
#include "runtime.cuh"
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool){
int warpIdxx = (threadIdx.x/warpSize);
__shared__ volatile int barID; // the ID for bar.sync
__shared__ volatile int smStartIndx; // the start pointer for free memory region of shared memory
__shared__ volatile int doneCtr[BP_NUM]; // number of warp in a task
__shared__ volatile gWarpStruct warpPoolDev[BP_NUM]; // warpPool
int taskPointer; //local pointer of task table
int taskStartP; //global pointer of task table
__shared__ volatile int barIDArray[syncNum];
__shared__ volatile int sharedTree[SH_TREE_SIZE]; //shared mem data structure
__shared__ volatile int warpCtr;
__shared__ volatile int warpId;
__shared__ volatile int exit;
extern __shared__ volatile int shared_mem[];
int i;
int threadDone;
// Init warp pool
if((threadIdx.x & 0x1f) != 0)
warpPoolDev[(threadIdx.x & 0x1f)].exec = 0;
else
warpPoolDev[(threadIdx.x & 0x1f)].exec = -1;
taskPointer = 0;
exit = 0;
doneCtr[(threadIdx.x & 0x1f)] = 0;
__threadfence_block();
if(threadIdx.x < warpSize){
while(!(*done)){
taskStartP = (taskPointer * BK_NUM + blockIdx.x);
__threadfence_block();
if(gTaskPool[taskStartP].readyId != -1 && doneCtr[taskPointer] == 0){
if(gTaskPool[gTaskPool[taskStartP].readyId].done == 1){
barID = -1;
smStartIndx = -1;
doneCtr[taskPointer] = gTaskPool[gTaskPool[taskStartP].readyId].thread*
gTaskPool[gTaskPool[taskStartP].readyId].block/warpSize;
warpCtr = doneCtr[taskPointer];
warpId = 0;
//parallel scheduling
while(1){
threadDone = 1;
if(threadIdx.x > 0) {
threadDone = 0;
if(warpPoolDev[threadIdx.x].exec == 0){
if(atomicSub((int*)&warpCtr, 1) > 0){
warpPoolDev[threadIdx.x].warpId = atomicAdd((int*)&warpId, 1)*warpSize;
warpPoolDev[threadIdx.x].bufferNum = gTaskPool[taskStartP].readyId; // global pointer of task table
warpPoolDev[threadIdx.x].SMindex = smStartIndx; // shared mem. index
warpPoolDev[threadIdx.x].barId = barID; // index of threadblock
warpPoolDev[threadIdx.x].threadNum = gTaskPool[gTaskPool[taskStartP].readyId].thread; // num. of thread
warpPoolDev[threadIdx.x].taskId = taskPointer; // local pointer of task table
__threadfence_block(); // To make sure the exec. is worked after fence
warpPoolDev[threadIdx.x].exec = 1;
__threadfence_block(); //
} // End atomic
}
}
if(warpCtr <= 0) threadDone = 1;
if(__all(threadDone == 1) != 0){
break;
}
}// End while(1)
//gTaskPool[taskStartP-1].done = 0; // reset flag whenever task scheduling has been done
gTaskPool[gTaskPool[taskStartP].readyId].done = 0;
gTaskPool[taskStartP].readyId = -1;
} // End if ready flag
}
taskPointer++; // renew the local pointer of task table
if(taskPointer == BP_NUM)
taskPointer = 0;
}// End while done
exit = 1;
__threadfence_block();
}// End if thread < 32
else
{
while(!exit){
if(warpPoolDev[warpIdxx].exec == 1){
// kernel running here
switch(gTaskPool[warpPoolDev[warpIdxx].bufferNum].funcId){
case 0:
convolutionRowsGPU((float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[0],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[1],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[2],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[3],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[4],
warpPoolDev[warpIdxx].warpId);
break;
case 1:
convolutionColumnsGPU((float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[0],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[1],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[2],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[3],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[4],
warpPoolDev[warpIdxx].warpId);
break;
default:
break;
}
if((threadIdx.x & 0x1f) == 0){
if(atomicSub((int*)&doneCtr[warpPoolDev[warpIdxx].taskId], 1) == 1){ // when all warps in a task have been done
__threadfence_system();
gTaskPool[warpPoolDev[warpIdxx].bufferNum].ready = 0; //unset the ready flag
atomicAdd((int*)totalScheTasks,1); //update the global task counter
}
warpPoolDev[warpIdxx].exec = 0;
__threadfence_block();
} // End if exec
} // End if threadIdx.x
} // End while done
} // End else
}
| 6b6ecc8a760044c10722b21699e28ebfcfae4978.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include "kernel.cuh"
#include "runtime.cuh"
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool){
int warpIdxx = (threadIdx.x/warpSize);
__shared__ volatile int barID; // the ID for bar.sync
__shared__ volatile int smStartIndx; // the start pointer for free memory region of shared memory
__shared__ volatile int doneCtr[BP_NUM]; // number of warp in a task
__shared__ volatile gWarpStruct warpPoolDev[BP_NUM]; // warpPool
int taskPointer; //local pointer of task table
int taskStartP; //global pointer of task table
__shared__ volatile int barIDArray[syncNum];
__shared__ volatile int sharedTree[SH_TREE_SIZE]; //shared mem data structure
__shared__ volatile int warpCtr;
__shared__ volatile int warpId;
__shared__ volatile int exit;
extern __shared__ volatile int shared_mem[];
int i;
int threadDone;
// Init warp pool
if((threadIdx.x & 0x1f) != 0)
warpPoolDev[(threadIdx.x & 0x1f)].exec = 0;
else
warpPoolDev[(threadIdx.x & 0x1f)].exec = -1;
taskPointer = 0;
exit = 0;
doneCtr[(threadIdx.x & 0x1f)] = 0;
__threadfence_block();
if(threadIdx.x < warpSize){
while(!(*done)){
taskStartP = (taskPointer * BK_NUM + blockIdx.x);
__threadfence_block();
if(gTaskPool[taskStartP].readyId != -1 && doneCtr[taskPointer] == 0){
if(gTaskPool[gTaskPool[taskStartP].readyId].done == 1){
barID = -1;
smStartIndx = -1;
doneCtr[taskPointer] = gTaskPool[gTaskPool[taskStartP].readyId].thread*
gTaskPool[gTaskPool[taskStartP].readyId].block/warpSize;
warpCtr = doneCtr[taskPointer];
warpId = 0;
//parallel scheduling
while(1){
threadDone = 1;
if(threadIdx.x > 0) {
threadDone = 0;
if(warpPoolDev[threadIdx.x].exec == 0){
if(atomicSub((int*)&warpCtr, 1) > 0){
warpPoolDev[threadIdx.x].warpId = atomicAdd((int*)&warpId, 1)*warpSize;
warpPoolDev[threadIdx.x].bufferNum = gTaskPool[taskStartP].readyId; // global pointer of task table
warpPoolDev[threadIdx.x].SMindex = smStartIndx; // shared mem. index
warpPoolDev[threadIdx.x].barId = barID; // index of threadblock
warpPoolDev[threadIdx.x].threadNum = gTaskPool[gTaskPool[taskStartP].readyId].thread; // num. of thread
warpPoolDev[threadIdx.x].taskId = taskPointer; // local pointer of task table
__threadfence_block(); // To make sure the exec. is worked after fence
warpPoolDev[threadIdx.x].exec = 1;
__threadfence_block(); //
} // End atomic
}
}
if(warpCtr <= 0) threadDone = 1;
if(__all(threadDone == 1) != 0){
break;
}
}// End while(1)
//gTaskPool[taskStartP-1].done = 0; // reset flag whenever task scheduling has been done
gTaskPool[gTaskPool[taskStartP].readyId].done = 0;
gTaskPool[taskStartP].readyId = -1;
} // End if ready flag
}
taskPointer++; // renew the local pointer of task table
if(taskPointer == BP_NUM)
taskPointer = 0;
}// End while done
exit = 1;
__threadfence_block();
}// End if thread < 32
else
{
while(!exit){
if(warpPoolDev[warpIdxx].exec == 1){
// kernel running here
switch(gTaskPool[warpPoolDev[warpIdxx].bufferNum].funcId){
case 0:
convolutionRowsGPU((float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[0],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[1],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[2],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[3],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[4],
warpPoolDev[warpIdxx].warpId);
break;
case 1:
convolutionColumnsGPU((float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[0],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[1],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[2],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[3],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[4],
warpPoolDev[warpIdxx].warpId);
break;
default:
break;
}
if((threadIdx.x & 0x1f) == 0){
if(atomicSub((int*)&doneCtr[warpPoolDev[warpIdxx].taskId], 1) == 1){ // when all warps in a task have been done
__threadfence_system();
gTaskPool[warpPoolDev[warpIdxx].bufferNum].ready = 0; //unset the ready flag
atomicAdd((int*)totalScheTasks,1); //update the global task counter
}
warpPoolDev[warpIdxx].exec = 0;
__threadfence_block();
} // End if exec
} // End if threadIdx.x
} // End while done
} // End else
}
|
skrun.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sdtbs_cu.h"
#include <pthread.h>
#include <unistd.h>
__device__ tbs_type_t d_tbs_type;
__device__ skrun_t *d_skruns;
__device__ unsigned *d_mtbs_done_cnts;
static skrun_t *g_skruns;
static unsigned *g_mtbs_done_cnts;
static unsigned *info_n_mtbs;
static BOOL *skrun_dones;
static unsigned skrid_done_min;
static unsigned cur_skrid_host;
static BOOL checker_done;
static pthread_t checker;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
static hipStream_t strm_submit;
#define SK_PROTO(name) __device__ int name(void *args[])
#define SK_FUNCS(base) SK_PROTO(base);
SK_FUNCS(loopcalc)
SK_FUNCS(mklc)
SK_FUNCS(gma)
SK_FUNCS(lma)
SK_FUNCS(kmeans)
static __device__ int
run_sub_kernel_func(skid_t skid, void *args[])
{
switch (skid) {
case LOOPCALC:
return loopcalc(args);
case MKLC:
return mklc(args);
case GMA:
return gma(args);
case LMA:
return lma(args);
case KMEANS:
return kmeans(args);
default:
return 0;
}
}
__device__ void
run_sub_kernel(skrid_t skrid)
{
skrun_t *skr;
int res;
skr = &d_skruns[skrid - 1];
res = run_sub_kernel_func(skr->skid, skr->args);
if (get_threadIdxX() == 0)
skr->res = res;
}
__global__ void
sub_kernel_func(skrid_t skrid)
{
run_sub_kernel(skrid);
}
static skrid_t
submit_skrun(skid_t skid, dim3 dimGrid, dim3 dimBlock, void *args[])
{
skrid_t skrid;
skrun_t skrun;
skrun.skid = skid;
skrun.dimGrid = dimGrid;
skrun.dimBlock = dimBlock;
memcpy(skrun.args, args, sizeof(void *) * MAX_ARGS);
skrun.res = 0;
skrun.n_tbs = dimGrid.x * dimGrid.y;
skrun.n_mtbs_per_tb = dimBlock.x * dimBlock.y / N_THREADS_PER_mTB;
pthread_mutex_lock(&mutex);
skrid = cur_skrid_host + 1;
info_n_mtbs[skrid - 1] = skrun.n_tbs * skrun.n_mtbs_per_tb;
hipMemcpyAsync(g_skruns + cur_skrid_host, &skrun, sizeof(skrun_t), hipMemcpyHostToDevice, strm_submit);
hipStreamSynchronize(strm_submit);
cur_skrid_host++;
pthread_mutex_unlock(&mutex);
return skrid;
}
skrid_t
launch_kernel(skid_t skid, hipStream_t strm, dim3 dimGrid, dim3 dimBlock, void *args[])
{
skrid_t skrid;
skrid = submit_skrun(skid, dimGrid, dimBlock, args);
if (sched->type == TBS_TYPE_HW)
hipLaunchKernelGGL(( sub_kernel_func), dim3(dimGrid), dim3(dimBlock), 0, strm, skrid);
return skrid;
}
static void
wait_skrun(skrid_t skrid)
{
pthread_mutex_lock(&mutex);
while (!checker_done && !skrun_dones[skrid - 1])
pthread_cond_wait(&cond, &mutex);
pthread_mutex_unlock(&mutex);
}
void
wait_kernel(skrid_t skrid, hipStream_t strm, int *pres)
{
skrun_t *skr;
int res;
if (sched->type == TBS_TYPE_HW)
hipStreamSynchronize(strm);
else
wait_skrun(skrid);
skr = g_skruns + (skrid - 1);
hipMemcpyAsync(&res, &skr->res, sizeof(int), hipMemcpyDeviceToHost, strm);
hipStreamSynchronize(strm);
*pres = res;
}
static void
notify_done_skruns(unsigned *mtbs_done_cnts, unsigned n_checks)
{
unsigned min_new = skrid_done_min;
BOOL notify = FALSE;
unsigned i;
pthread_mutex_lock(&mutex);
for (i = 0; i < n_checks; i++) {
if (skrun_dones[i + skrid_done_min])
continue;
if (mtbs_done_cnts[i] == info_n_mtbs[i + skrid_done_min]) {
notify = TRUE;
skrun_dones[i + skrid_done_min] = TRUE;
if (min_new == i + skrid_done_min) {
min_new++;
}
}
}
skrid_done_min = min_new;
if (notify)
pthread_cond_broadcast(&cond);
pthread_mutex_unlock(&mutex);
}
static void *
skruns_checkfunc(void *arg)
{
hipStream_t strm;
hipStreamCreate(&strm);
while (!checker_done) {
unsigned n_checks = cur_skrid_host - skrid_done_min;
if (n_checks > 0) {
unsigned *mtbs_done_cnts = (unsigned *)malloc(sizeof(unsigned) * n_checks);
hipMemcpyAsync(mtbs_done_cnts, g_mtbs_done_cnts + skrid_done_min, sizeof(unsigned) * n_checks, hipMemcpyDeviceToHost, strm);
hipStreamSynchronize(strm);
notify_done_skruns(mtbs_done_cnts, n_checks);
free(mtbs_done_cnts);
}
usleep(100);
}
hipStreamDestroy(strm);
return NULL;
}
__global__ void
kernel_init_skrun(tbs_type_t type, skrun_t *skruns, unsigned *mtbs_done_cnts)
{
int i;
d_tbs_type = type;
d_skruns = skruns;
d_mtbs_done_cnts = mtbs_done_cnts;
for (i = 0; i < MAX_QUEUED_KERNELS; i++) {
skruns[i].skid = 0;
mtbs_done_cnts[i] = 0;
}
}
void
init_skrun(void)
{
hipError_t err;
hipStreamCreate(&strm_submit);
hipMalloc(&g_skruns, sizeof(skrun_t) * MAX_QUEUED_KERNELS);
hipMalloc(&g_mtbs_done_cnts, sizeof(unsigned) * MAX_QUEUED_KERNELS);
info_n_mtbs = (unsigned *)calloc(MAX_QUEUED_KERNELS, sizeof(unsigned));
skrun_dones = (BOOL *)calloc(MAX_QUEUED_KERNELS, sizeof(BOOL));
pthread_create(&checker, NULL, skruns_checkfunc, NULL);
dim3 dimGrid(1,1), dimBlock(1,1);
hipLaunchKernelGGL(( kernel_init_skrun), dim3(dimGrid), dim3(dimBlock), 0, 0, sched->type, g_skruns, g_mtbs_done_cnts);
err = hipGetLastError();
if (err != hipSuccess)
error("failed to initialize skrun: %s\n", hipGetErrorString(err));
else
hipDeviceSynchronize();
}
void
fini_skrun(void)
{
void *retval;
checker_done = TRUE;
pthread_join(checker, &retval);
}
| skrun.cu | #include "sdtbs_cu.h"
#include <pthread.h>
#include <unistd.h>
__device__ tbs_type_t d_tbs_type;
__device__ skrun_t *d_skruns;
__device__ unsigned *d_mtbs_done_cnts;
static skrun_t *g_skruns;
static unsigned *g_mtbs_done_cnts;
static unsigned *info_n_mtbs;
static BOOL *skrun_dones;
static unsigned skrid_done_min;
static unsigned cur_skrid_host;
static BOOL checker_done;
static pthread_t checker;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
static cudaStream_t strm_submit;
#define SK_PROTO(name) __device__ int name(void *args[])
#define SK_FUNCS(base) SK_PROTO(base);
SK_FUNCS(loopcalc)
SK_FUNCS(mklc)
SK_FUNCS(gma)
SK_FUNCS(lma)
SK_FUNCS(kmeans)
static __device__ int
run_sub_kernel_func(skid_t skid, void *args[])
{
switch (skid) {
case LOOPCALC:
return loopcalc(args);
case MKLC:
return mklc(args);
case GMA:
return gma(args);
case LMA:
return lma(args);
case KMEANS:
return kmeans(args);
default:
return 0;
}
}
__device__ void
run_sub_kernel(skrid_t skrid)
{
skrun_t *skr;
int res;
skr = &d_skruns[skrid - 1];
res = run_sub_kernel_func(skr->skid, skr->args);
if (get_threadIdxX() == 0)
skr->res = res;
}
__global__ void
sub_kernel_func(skrid_t skrid)
{
run_sub_kernel(skrid);
}
static skrid_t
submit_skrun(skid_t skid, dim3 dimGrid, dim3 dimBlock, void *args[])
{
skrid_t skrid;
skrun_t skrun;
skrun.skid = skid;
skrun.dimGrid = dimGrid;
skrun.dimBlock = dimBlock;
memcpy(skrun.args, args, sizeof(void *) * MAX_ARGS);
skrun.res = 0;
skrun.n_tbs = dimGrid.x * dimGrid.y;
skrun.n_mtbs_per_tb = dimBlock.x * dimBlock.y / N_THREADS_PER_mTB;
pthread_mutex_lock(&mutex);
skrid = cur_skrid_host + 1;
info_n_mtbs[skrid - 1] = skrun.n_tbs * skrun.n_mtbs_per_tb;
cudaMemcpyAsync(g_skruns + cur_skrid_host, &skrun, sizeof(skrun_t), cudaMemcpyHostToDevice, strm_submit);
cudaStreamSynchronize(strm_submit);
cur_skrid_host++;
pthread_mutex_unlock(&mutex);
return skrid;
}
skrid_t
launch_kernel(skid_t skid, cudaStream_t strm, dim3 dimGrid, dim3 dimBlock, void *args[])
{
skrid_t skrid;
skrid = submit_skrun(skid, dimGrid, dimBlock, args);
if (sched->type == TBS_TYPE_HW)
sub_kernel_func<<<dimGrid, dimBlock, 0, strm>>>(skrid);
return skrid;
}
static void
wait_skrun(skrid_t skrid)
{
pthread_mutex_lock(&mutex);
while (!checker_done && !skrun_dones[skrid - 1])
pthread_cond_wait(&cond, &mutex);
pthread_mutex_unlock(&mutex);
}
void
wait_kernel(skrid_t skrid, cudaStream_t strm, int *pres)
{
skrun_t *skr;
int res;
if (sched->type == TBS_TYPE_HW)
cudaStreamSynchronize(strm);
else
wait_skrun(skrid);
skr = g_skruns + (skrid - 1);
cudaMemcpyAsync(&res, &skr->res, sizeof(int), cudaMemcpyDeviceToHost, strm);
cudaStreamSynchronize(strm);
*pres = res;
}
static void
notify_done_skruns(unsigned *mtbs_done_cnts, unsigned n_checks)
{
unsigned min_new = skrid_done_min;
BOOL notify = FALSE;
unsigned i;
pthread_mutex_lock(&mutex);
for (i = 0; i < n_checks; i++) {
if (skrun_dones[i + skrid_done_min])
continue;
if (mtbs_done_cnts[i] == info_n_mtbs[i + skrid_done_min]) {
notify = TRUE;
skrun_dones[i + skrid_done_min] = TRUE;
if (min_new == i + skrid_done_min) {
min_new++;
}
}
}
skrid_done_min = min_new;
if (notify)
pthread_cond_broadcast(&cond);
pthread_mutex_unlock(&mutex);
}
static void *
skruns_checkfunc(void *arg)
{
cudaStream_t strm;
cudaStreamCreate(&strm);
while (!checker_done) {
unsigned n_checks = cur_skrid_host - skrid_done_min;
if (n_checks > 0) {
unsigned *mtbs_done_cnts = (unsigned *)malloc(sizeof(unsigned) * n_checks);
cudaMemcpyAsync(mtbs_done_cnts, g_mtbs_done_cnts + skrid_done_min, sizeof(unsigned) * n_checks, cudaMemcpyDeviceToHost, strm);
cudaStreamSynchronize(strm);
notify_done_skruns(mtbs_done_cnts, n_checks);
free(mtbs_done_cnts);
}
usleep(100);
}
cudaStreamDestroy(strm);
return NULL;
}
__global__ void
kernel_init_skrun(tbs_type_t type, skrun_t *skruns, unsigned *mtbs_done_cnts)
{
int i;
d_tbs_type = type;
d_skruns = skruns;
d_mtbs_done_cnts = mtbs_done_cnts;
for (i = 0; i < MAX_QUEUED_KERNELS; i++) {
skruns[i].skid = 0;
mtbs_done_cnts[i] = 0;
}
}
void
init_skrun(void)
{
cudaError_t err;
cudaStreamCreate(&strm_submit);
cudaMalloc(&g_skruns, sizeof(skrun_t) * MAX_QUEUED_KERNELS);
cudaMalloc(&g_mtbs_done_cnts, sizeof(unsigned) * MAX_QUEUED_KERNELS);
info_n_mtbs = (unsigned *)calloc(MAX_QUEUED_KERNELS, sizeof(unsigned));
skrun_dones = (BOOL *)calloc(MAX_QUEUED_KERNELS, sizeof(BOOL));
pthread_create(&checker, NULL, skruns_checkfunc, NULL);
dim3 dimGrid(1,1), dimBlock(1,1);
kernel_init_skrun<<<dimGrid, dimBlock>>>(sched->type, g_skruns, g_mtbs_done_cnts);
err = cudaGetLastError();
if (err != cudaSuccess)
error("failed to initialize skrun: %s\n", cudaGetErrorString(err));
else
cudaDeviceSynchronize();
}
void
fini_skrun(void)
{
void *retval;
checker_done = TRUE;
pthread_join(checker, &retval);
}
|
40ce8fdaa2bfa12da6f8c2944d3f64e7997341eb.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include <hip/hip_runtime.h>
//#include <cutil_inline.h>
using namespace std;
#define SUBMATRIX_SIZE 10000
#define NUM_BIN 500
#define HIST_MIN 0.0
#define HIST_MAX 3.5
////////////////////////////////////////////////////////////////////////
__global__ void distance(float *a, float *d, int xind, int yind, int *dev_hist)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int thread_idx = idx;
idx += xind;
float alpha = a[idx], delta = d[idx];
float cos_d1 = cos(delta), sin_d1 = sin(delta), dist;
int ymax = yind + SUBMATRIX_SIZE;
int bin_index;
int offset = 0;
float a_diff, sin_a_diff, cos_a_diff;
float cos_d2, sin_d2, numer, denom, mult1, mult2;
for(int i=yind; i<ymax; i++)
{
if(idx > i)
{
a_diff = a[i] - alpha;
sin_a_diff = sin(a_diff);
cos_a_diff = cos(a_diff);
sin_d2 = sin(d[i]);
cos_d2 = cos(d[i]);
mult1 = cos_d2 * cos_d2 * sin_a_diff * sin_a_diff;
mult2 = cos_d1 * sin_d2 - sin_d1 * cos_d2 * cos_a_diff;
mult2 = mult2 * mult2;
numer = sqrt(mult1 + mult2);
denom = sin_d1 *sin_d2 + cos_d1 * cos_d2 * cos_a_diff;
//dist = atan(num);
dist = atan2(numer,denom);
if(dist < HIST_MIN)
bin_index = 0;
else if(dist >= HIST_MAX)
bin_index = NUM_BIN + 1;
else
bin_index = int(((dist - HIST_MIN) * NUM_BIN / HIST_MAX) +1);
offset = ((NUM_BIN+2)*thread_idx);
bin_index += offset;
dev_hist[bin_index]++;
}
}
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
float *d_alpha, *d_delta;
float *h_alpha, *h_delta;
int NUM_PARTICLES;
if (argc < 3)
{
printf("\nMust pass in cluster_data file on command line!\n");
printf("\nUsage: ", argv[0] );
printf(" <cluster_data file> <distances file> \n\n");
exit(1);
}
FILE *infile, *outfile ;
infile = fopen(argv[1],"r");
outfile = fopen(argv[2], "w");
//////////////////////////////////////////////////////////////////////
// Read in the cluster_data file
////////////////////////////////////////////////////////////////////////////
char axis_titles[256];
char dummy[256];
fscanf(infile, "%s %s %s", &axis_titles, &dummy, &axis_titles);
fscanf(infile, "%d", &NUM_PARTICLES);
int size = NUM_PARTICLES * sizeof(float);
printf("# particles: %d\n",NUM_PARTICLES);
h_alpha = (float*)malloc(size);
h_delta = (float*)malloc(size);
for(int i=0; i<NUM_PARTICLES; i++)
{
fscanf(infile, "%f %s %f %s ", &h_alpha[i], &dummy, &h_delta[i], &dummy);
//fscanf(infile, "%f%s %f ", &h_alpha[i], &dummy, &h_delta[i]);
// printf("%e %s %e\n", h_alpha[i], dummy, h_delta[i]);
}
////////////////////////////////////////////////////////////////////////////
//allocation of histogram
///////////////////////////////////////////////////////////////////////////
int *hist, *dev_hist;
int size_hist = SUBMATRIX_SIZE * (NUM_BIN+2);
int size_hist_bytes = size_hist*sizeof(int);
hist = (int*)malloc(size_hist_bytes);
memset(hist, 0, size_hist_bytes);
printf("size_hist: %d\n",size_hist_bytes);
hipMalloc((void **) &dev_hist, (size_hist_bytes));
hipMemset(dev_hist, 0, size_hist_bytes);
unsigned long *hist_array;
hist_array = (unsigned long*)malloc((NUM_BIN+2) * sizeof(unsigned long));
memset(hist_array, 0, (NUM_BIN+2)*sizeof(unsigned long));
////////////////////////////////////////////////////////////////////////////
// Define the grid and block size
////////////////////////////////////////////////////////////////////////////
dim3 grid, block;
grid.x =100;
block.x = SUBMATRIX_SIZE/grid.x; //NUM_PARTICLES/block.x;
////////////////////////////////////////////////////////////////////////////
hipMalloc((void **) &d_alpha, size );
hipMalloc((void **) &d_delta, size );
// Check to see if we allocated enough memory.
if (0==d_alpha || 0==d_delta|| 0==dev_hist)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
hipMemset(d_alpha,0,size);
hipMemset(d_delta,0,size);
hipMemcpy(d_alpha, h_alpha, size, hipMemcpyHostToDevice );
hipMemcpy(d_delta, h_delta, size, hipMemcpyHostToDevice );
int x, y;
int num_submatrices = NUM_PARTICLES / SUBMATRIX_SIZE;
int bin_index = 0;
for(int k = 0; k < num_submatrices; k++)
{
y = k*SUBMATRIX_SIZE;
// printf("%d %d\n",k,y);
for(int j = 0; j < num_submatrices; j++)
{
x = j *SUBMATRIX_SIZE;
//printf("----\n");
//printf("%d %d\t\t%d %d\n",k,y,j,x);
//printf("----\n");
hipMemset(dev_hist,0,size_hist_bytes);
hipLaunchKernelGGL(( distance), dim3(grid),dim3(block), 0, 0, d_alpha, d_delta, x, y, dev_hist);
hipMemcpy(hist, dev_hist, size_hist_bytes, hipMemcpyDeviceToHost);
for(int m=0; m<size_hist; m++)
{
bin_index = m%(NUM_BIN+2);
//if(bin_index == 0)
//printf("\n");
//printf("%3i:%3i ", m, hist[m]);
//printf("%3i ", hist[m]);
hist_array[bin_index] += hist[m];
}
//printf("\n");
}
}
unsigned long total = 0;
float bin_width = (HIST_MAX - HIST_MIN) / NUM_BIN;
float bins_mid = 0;
fprintf(outfile, "%s %s\n", "Angular Distance(radians)","Number of Entries");
for(int k=0; k<NUM_BIN+2; k++)
{
bins_mid = bin_width*(k - 0.5);
fprintf(outfile, "%.3e %s %lu \n", bins_mid, ",", hist_array[k]);
total += hist_array[k];
}
printf("total: %lu \n", total);
fclose(infile);
fclose(outfile);
free(h_alpha);
free(h_delta);
free(hist);
hipFree(d_alpha);
hipFree(d_delta);
hipFree(dev_hist);
return 0;
}
//////////////////////////////////////////////////////////////////////
| 40ce8fdaa2bfa12da6f8c2944d3f64e7997341eb.cu | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include <cuda_runtime.h>
//#include <cutil_inline.h>
using namespace std;
#define SUBMATRIX_SIZE 10000
#define NUM_BIN 500
#define HIST_MIN 0.0
#define HIST_MAX 3.5
////////////////////////////////////////////////////////////////////////
__global__ void distance(float *a, float *d, int xind, int yind, int *dev_hist)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int thread_idx = idx;
idx += xind;
float alpha = a[idx], delta = d[idx];
float cos_d1 = cos(delta), sin_d1 = sin(delta), dist;
int ymax = yind + SUBMATRIX_SIZE;
int bin_index;
int offset = 0;
float a_diff, sin_a_diff, cos_a_diff;
float cos_d2, sin_d2, numer, denom, mult1, mult2;
for(int i=yind; i<ymax; i++)
{
if(idx > i)
{
a_diff = a[i] - alpha;
sin_a_diff = sin(a_diff);
cos_a_diff = cos(a_diff);
sin_d2 = sin(d[i]);
cos_d2 = cos(d[i]);
mult1 = cos_d2 * cos_d2 * sin_a_diff * sin_a_diff;
mult2 = cos_d1 * sin_d2 - sin_d1 * cos_d2 * cos_a_diff;
mult2 = mult2 * mult2;
numer = sqrt(mult1 + mult2);
denom = sin_d1 *sin_d2 + cos_d1 * cos_d2 * cos_a_diff;
//dist = atan(num);
dist = atan2(numer,denom);
if(dist < HIST_MIN)
bin_index = 0;
else if(dist >= HIST_MAX)
bin_index = NUM_BIN + 1;
else
bin_index = int(((dist - HIST_MIN) * NUM_BIN / HIST_MAX) +1);
offset = ((NUM_BIN+2)*thread_idx);
bin_index += offset;
dev_hist[bin_index]++;
}
}
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
float *d_alpha, *d_delta;
float *h_alpha, *h_delta;
int NUM_PARTICLES;
if (argc < 3)
{
printf("\nMust pass in cluster_data file on command line!\n");
printf("\nUsage: ", argv[0] );
printf(" <cluster_data file> <distances file> \n\n");
exit(1);
}
FILE *infile, *outfile ;
infile = fopen(argv[1],"r");
outfile = fopen(argv[2], "w");
//////////////////////////////////////////////////////////////////////
// Read in the cluster_data file
////////////////////////////////////////////////////////////////////////////
char axis_titles[256];
char dummy[256];
fscanf(infile, "%s %s %s", &axis_titles, &dummy, &axis_titles);
fscanf(infile, "%d", &NUM_PARTICLES);
int size = NUM_PARTICLES * sizeof(float);
printf("# particles: %d\n",NUM_PARTICLES);
h_alpha = (float*)malloc(size);
h_delta = (float*)malloc(size);
for(int i=0; i<NUM_PARTICLES; i++)
{
fscanf(infile, "%f %s %f %s ", &h_alpha[i], &dummy, &h_delta[i], &dummy);
//fscanf(infile, "%f%s %f ", &h_alpha[i], &dummy, &h_delta[i]);
// printf("%e %s %e\n", h_alpha[i], dummy, h_delta[i]);
}
////////////////////////////////////////////////////////////////////////////
//allocation of histogram
///////////////////////////////////////////////////////////////////////////
int *hist, *dev_hist;
int size_hist = SUBMATRIX_SIZE * (NUM_BIN+2);
int size_hist_bytes = size_hist*sizeof(int);
hist = (int*)malloc(size_hist_bytes);
memset(hist, 0, size_hist_bytes);
printf("size_hist: %d\n",size_hist_bytes);
cudaMalloc((void **) &dev_hist, (size_hist_bytes));
cudaMemset(dev_hist, 0, size_hist_bytes);
unsigned long *hist_array;
hist_array = (unsigned long*)malloc((NUM_BIN+2) * sizeof(unsigned long));
memset(hist_array, 0, (NUM_BIN+2)*sizeof(unsigned long));
////////////////////////////////////////////////////////////////////////////
// Define the grid and block size
////////////////////////////////////////////////////////////////////////////
dim3 grid, block;
grid.x =100;
block.x = SUBMATRIX_SIZE/grid.x; //NUM_PARTICLES/block.x;
////////////////////////////////////////////////////////////////////////////
cudaMalloc((void **) &d_alpha, size );
cudaMalloc((void **) &d_delta, size );
// Check to see if we allocated enough memory.
if (0==d_alpha || 0==d_delta|| 0==dev_hist)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_alpha,0,size);
cudaMemset(d_delta,0,size);
cudaMemcpy(d_alpha, h_alpha, size, cudaMemcpyHostToDevice );
cudaMemcpy(d_delta, h_delta, size, cudaMemcpyHostToDevice );
int x, y;
int num_submatrices = NUM_PARTICLES / SUBMATRIX_SIZE;
int bin_index = 0;
for(int k = 0; k < num_submatrices; k++)
{
y = k*SUBMATRIX_SIZE;
// printf("%d %d\n",k,y);
for(int j = 0; j < num_submatrices; j++)
{
x = j *SUBMATRIX_SIZE;
//printf("----\n");
//printf("%d %d\t\t%d %d\n",k,y,j,x);
//printf("----\n");
cudaMemset(dev_hist,0,size_hist_bytes);
distance<<<grid,block>>>(d_alpha, d_delta, x, y, dev_hist);
cudaMemcpy(hist, dev_hist, size_hist_bytes, cudaMemcpyDeviceToHost);
for(int m=0; m<size_hist; m++)
{
bin_index = m%(NUM_BIN+2);
//if(bin_index == 0)
//printf("\n");
//printf("%3i:%3i ", m, hist[m]);
//printf("%3i ", hist[m]);
hist_array[bin_index] += hist[m];
}
//printf("\n");
}
}
unsigned long total = 0;
float bin_width = (HIST_MAX - HIST_MIN) / NUM_BIN;
float bins_mid = 0;
fprintf(outfile, "%s %s\n", "Angular Distance(radians)","Number of Entries");
for(int k=0; k<NUM_BIN+2; k++)
{
bins_mid = bin_width*(k - 0.5);
fprintf(outfile, "%.3e %s %lu \n", bins_mid, ",", hist_array[k]);
total += hist_array[k];
}
printf("total: %lu \n", total);
fclose(infile);
fclose(outfile);
free(h_alpha);
free(h_delta);
free(hist);
cudaFree(d_alpha);
cudaFree(d_delta);
cudaFree(dev_hist);
return 0;
}
//////////////////////////////////////////////////////////////////////
|
f6e3800265b41ada6efd6463b0fe11cb581036b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Diffusion.cuh"
#include "solver/LinearSystemDevice.h"
#include <memory>
using namespace fast;
__device__ __constant__ DiffusionSysParams const_diffusion_params;
//DiffusionSysParams const_diffusion_params_cpu;
bool fast::DiffusionCommitSysParams(const DiffusionSysParams & sysparams) {
hipError_t res = hipMemcpyToSymbol(
const_diffusion_params,
&sysparams,
sizeof(DiffusionSysParams),
0,
hipMemcpyHostToDevice
);
//const_diffusion_params_cpu = sysparams;
return res == hipSuccess;
}
////////////////////////////////
template <typename T>
__global__ void ___linearSysKernel(
CUDA_Volume domain,
LinearSys_StencilDevicePtr<T> A,
T * x,
T * b
) {
VOLUME_VOX_GUARD(domain.res);
size_t i = _linearIndex(domain.res, vox);
T bval = 0.0;
T xval = 0.0;
Stencil_7<T> stencil;
//(c > 0) ? T(value_one) : T(value_zero)
Stencil_7<T> diffusivity;
diffusivity.v[DIR_NONE] = read<T>(domain.surf, vox);
diffusivity.v[X_NEG] = (read<T>(domain.surf, clampedVox(domain.res, vox, X_NEG)) + diffusivity.v[DIR_NONE]) * T(0.5);
diffusivity.v[Y_NEG] = (read<T>(domain.surf, clampedVox(domain.res, vox, Y_NEG)) + diffusivity.v[DIR_NONE]) * T(0.5);
diffusivity.v[Z_NEG] = (read<T>(domain.surf, clampedVox(domain.res, vox, Z_NEG)) + diffusivity.v[DIR_NONE]) * T(0.5);
diffusivity.v[X_POS] = (read<T>(domain.surf, clampedVox(domain.res, vox, X_POS)) + diffusivity.v[DIR_NONE]) * T(0.5);
diffusivity.v[Y_POS] = (read<T>(domain.surf, clampedVox(domain.res, vox, Y_POS)) + diffusivity.v[DIR_NONE]) * T(0.5);
diffusivity.v[Z_POS] = (read<T>(domain.surf, clampedVox(domain.res, vox, Z_POS)) + diffusivity.v[DIR_NONE]) * T(0.5);
getSystemTopKernel<T>(
const_diffusion_params,
diffusivity,
ivec3(vox.x, vox.y, vox.z),
ivec3(domain.res.x, domain.res.y, domain.res.z),
&stencil,
&bval,
&xval
);
#pragma unroll
for (int k = 0; k <= DIR_NONE; k++) {
A.dir[k][i] = stencil.v[k];
}
b[i] = bval;
x[i] = xval;
}
template <typename T>
bool __DiffusionGenerateSystem_Impl(const CUDA_Volume & domain, LinearSystemDevice<T> * lsptr) {
//auto lsptr = std::make_unique<LinearSystemDevice<T>>();
auto & ls = *lsptr;
ls.res = ivec3(domain.res.x, domain.res.y, domain.res.z);
ls.NNZ = domain.res.x*domain.res.y*domain.res.z;
for (auto i = 0; i < 7; i++) {
hipDeviceSynchronize();
ls.A.dir[i].resize(ls.NNZ);
hipMemset(THRUST_PTR(ls.A.dir[i]), 0, sizeof(T) * ls.NNZ);
}
hipDeviceSynchronize();
ls.x.resize(ls.NNZ);
hipDeviceSynchronize();
ls.b.resize(ls.NNZ);
{
BLOCKS3D(8, ls.res);
LAUNCH(___linearSysKernel<T>, numBlocks, block,
domain,
ls.A.getPtr(),
THRUST_PTR(ls.x),
THRUST_PTR(ls.b)
);
}
return true;
}
bool fast::DiffusionGenerateSystem(const CUDA_Volume & domain, LinearSystem * linearSystemOut)
{
if (linearSystemOut == nullptr)
return false;
if (domain.type == TYPE_DOUBLE) {
assert(dynamic_cast<LinearSystemDevice<double>*>(linearSystemOut));
return __DiffusionGenerateSystem_Impl<double>(
domain,
static_cast<LinearSystemDevice<double>*>(linearSystemOut)
);
}
if (domain.type == TYPE_FLOAT) {
assert(dynamic_cast<LinearSystemDevice<float>*>(linearSystemOut));
return __DiffusionGenerateSystem_Impl<float>(
domain,
static_cast<LinearSystemDevice<float>*>(linearSystemOut)
);
}
return false;
} | f6e3800265b41ada6efd6463b0fe11cb581036b8.cu | #include "Diffusion.cuh"
#include "solver/LinearSystemDevice.h"
#include <memory>
using namespace fast;
__device__ __constant__ DiffusionSysParams const_diffusion_params;
//DiffusionSysParams const_diffusion_params_cpu;
bool fast::DiffusionCommitSysParams(const DiffusionSysParams & sysparams) {
cudaError_t res = cudaMemcpyToSymbol(
const_diffusion_params,
&sysparams,
sizeof(DiffusionSysParams),
0,
cudaMemcpyHostToDevice
);
//const_diffusion_params_cpu = sysparams;
return res == cudaSuccess;
}
////////////////////////////////
template <typename T>
__global__ void ___linearSysKernel(
CUDA_Volume domain,
LinearSys_StencilDevicePtr<T> A,
T * x,
T * b
) {
VOLUME_VOX_GUARD(domain.res);
size_t i = _linearIndex(domain.res, vox);
T bval = 0.0;
T xval = 0.0;
Stencil_7<T> stencil;
//(c > 0) ? T(value_one) : T(value_zero)
Stencil_7<T> diffusivity;
diffusivity.v[DIR_NONE] = read<T>(domain.surf, vox);
diffusivity.v[X_NEG] = (read<T>(domain.surf, clampedVox(domain.res, vox, X_NEG)) + diffusivity.v[DIR_NONE]) * T(0.5);
diffusivity.v[Y_NEG] = (read<T>(domain.surf, clampedVox(domain.res, vox, Y_NEG)) + diffusivity.v[DIR_NONE]) * T(0.5);
diffusivity.v[Z_NEG] = (read<T>(domain.surf, clampedVox(domain.res, vox, Z_NEG)) + diffusivity.v[DIR_NONE]) * T(0.5);
diffusivity.v[X_POS] = (read<T>(domain.surf, clampedVox(domain.res, vox, X_POS)) + diffusivity.v[DIR_NONE]) * T(0.5);
diffusivity.v[Y_POS] = (read<T>(domain.surf, clampedVox(domain.res, vox, Y_POS)) + diffusivity.v[DIR_NONE]) * T(0.5);
diffusivity.v[Z_POS] = (read<T>(domain.surf, clampedVox(domain.res, vox, Z_POS)) + diffusivity.v[DIR_NONE]) * T(0.5);
getSystemTopKernel<T>(
const_diffusion_params,
diffusivity,
ivec3(vox.x, vox.y, vox.z),
ivec3(domain.res.x, domain.res.y, domain.res.z),
&stencil,
&bval,
&xval
);
#pragma unroll
for (int k = 0; k <= DIR_NONE; k++) {
A.dir[k][i] = stencil.v[k];
}
b[i] = bval;
x[i] = xval;
}
template <typename T>
bool __DiffusionGenerateSystem_Impl(const CUDA_Volume & domain, LinearSystemDevice<T> * lsptr) {
//auto lsptr = std::make_unique<LinearSystemDevice<T>>();
auto & ls = *lsptr;
ls.res = ivec3(domain.res.x, domain.res.y, domain.res.z);
ls.NNZ = domain.res.x*domain.res.y*domain.res.z;
for (auto i = 0; i < 7; i++) {
cudaDeviceSynchronize();
ls.A.dir[i].resize(ls.NNZ);
cudaMemset(THRUST_PTR(ls.A.dir[i]), 0, sizeof(T) * ls.NNZ);
}
cudaDeviceSynchronize();
ls.x.resize(ls.NNZ);
cudaDeviceSynchronize();
ls.b.resize(ls.NNZ);
{
BLOCKS3D(8, ls.res);
LAUNCH(___linearSysKernel<T>, numBlocks, block,
domain,
ls.A.getPtr(),
THRUST_PTR(ls.x),
THRUST_PTR(ls.b)
);
}
return true;
}
bool fast::DiffusionGenerateSystem(const CUDA_Volume & domain, LinearSystem * linearSystemOut)
{
if (linearSystemOut == nullptr)
return false;
if (domain.type == TYPE_DOUBLE) {
assert(dynamic_cast<LinearSystemDevice<double>*>(linearSystemOut));
return __DiffusionGenerateSystem_Impl<double>(
domain,
static_cast<LinearSystemDevice<double>*>(linearSystemOut)
);
}
if (domain.type == TYPE_FLOAT) {
assert(dynamic_cast<LinearSystemDevice<float>*>(linearSystemOut));
return __DiffusionGenerateSystem_Impl<float>(
domain,
static_cast<LinearSystemDevice<float>*>(linearSystemOut)
);
}
return false;
} |
280d31c46498a4b66516a796dbe16fa06b730bd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<fstream>
#include<algorithm>
#include<string.h>
#include<string>
#include<ctime>
#include<cstdlib>
#include<hip/hip_runtime_api.h>
#include<device_launch_parameters.h>
#define block_size 4
#define thread_size 32
using namespace std;
//const int total_threads = block_size*block_size*thread_size*thread_size;
typedef struct {
int row, col;
float value;
}Triple;
Triple M[1000000];
Triple N[1000000];
//Triple C[100000000];
float C[9000][9000];
int rowM[1000000];
int colM[1000000];
float valueM[1000000];
int rowN[1000000];
int colN[1000000];
float valueN[1000000];
bool cmp(Triple x, Triple y) {
if(x.row > y.row || ((x.row == y.row) && (x.col > y.col)))
return false;
return true;
}
__global__ void cudaMat(float valueM[], float valueN[],
int rowM[], int rowN[],
int colM[], int colN[],
int indexM[], int indexN[],
int lenM[], int lenN[],
float C[][9000], int size2[]
)
{
int size = size2[0];
size = 8191;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int my_rank = i*thread_size*block_size + j;
i = my_rank/2;
if(i >= size)
return;
if(indexM[i]==-1)
return;
int jstart = 0;
int jend = size/2;
if(my_rank%2) {
jstart = size/2;
jend = size;
}
for(int j=jstart; j<jend; j++) {
if(indexN[j]==-1)
continue;
int p=0;
int q=0;
float sum = 0.0;
int flag = 0;
while(p<lenM[i] && q<lenN[j]) {
if(colM[p+indexM[i]] == colN[q+indexN[j]]) {
flag = 1;
sum += valueM[p+indexM[i]] *valueN[q+indexN[j]];
++p;++q;
}
else if(colM[p+indexM[i]] < colN[q+indexN[j]]) {
++p;
}
else {
++q;
}
}
if(!flag)
continue;
C[i][j] = sum;
}
}
clock_t start, tend;
int main()
{
int Msize1, Msize2, Nsize1, Nsize2;
int numM, numN;
// read data
ifstream fin;
fin.open("matrix.mat", ios::in);
//fin.open("test.dat", ios::in);
string s;
fin >> s >> s >> s >> s >> s;
fin >> Msize1 >> Msize2 >> numM;
int cnt = 0;
int row, col;
float value;
while (!fin.eof()) {
fin >> row >> col >> value;
M[cnt].row = row;
M[cnt].col = col;
M[cnt].value = value;
++cnt;
}
fin.close();
fin.open("matrix12.mat", ios::in);
//fin.open("test2.dat", ios::in);
fin >> s >> s >> s >> s >> s;
fin >> Nsize1 >> Nsize2 >> numN;
cnt = 0;
while (!fin.eof()) {
fin >> row >> col >> value;
N[cnt].row = col;
N[cnt].col = row;
N[cnt].value = value;
++cnt;
}
fin.close();
start = clock();
sort(M, M+numM, cmp);
sort(N, N+numN, cmp);
int indexM[Msize1+1];
int indexN[Nsize2+1];
int lenM[Msize1+1];
int lenN[Nsize2+1];
memset(indexM, -1, Msize1*sizeof(int));
memset(indexN, -1, Nsize2*sizeof(int));
cnt = 0;
int k = -1;
for(int i=0; i<numM; i++) {
if(indexM[M[i].row] == -1) {
indexM[M[i].row] = i;
k = M[i].row;
lenM[k] = 1;
while(M[i+1].row == k) {
i++;
lenM[k]++;
}
}
}
for(int i=0; i<numM; i++) {
rowM[i] = M[i].row;
colM[i] = M[i].col;
valueM[i] = M[i].value;
}
for(int i=0; i<numN; i++) {
if(indexN[N[i].row] == -1) {
indexN[N[i].row] = i;
k = N[i].row;
lenN[k] = 1;
while(N[i+1].row == k) {
i++;
lenN[k]++;
}
}
}
for(int i=0; i<numN; i++) {
rowN[i] = N[i].row;
colN[i] = N[i].col;
valueN[i] = N[i].value;
}
cnt = 0;
// split row i into different process
int *device_rowM, *device_colM, *device_rowN, *device_colN, *device_indexM, *device_indexN, *device_lenM, *device_lenN;
float *device_valueM, *device_valueN;
float (*device_C)[9000];
int *device_size;
hipMalloc((void **)&device_C, sizeof(float)* 9000*9000);
hipMalloc((void **)&device_rowM, sizeof(int)* 1000000);
hipMalloc((void **)&device_colM, sizeof(int)* 1000000);
hipMalloc((void **)&device_rowN, sizeof(int)* 1000000);
hipMalloc((void **)&device_colN, sizeof(int)* 1000000);
hipMalloc((void **)&device_indexM, sizeof(int)* 1000000);
hipMalloc((void **)&device_indexN, sizeof(int)* 1000000);
hipMalloc((void **)&device_lenM, sizeof(int)* 1000000);
hipMalloc((void **)&device_lenN, sizeof(int)* 1000000);
hipMalloc((void **)&device_valueM, sizeof(float)* 1000000);
hipMalloc((void **)&device_valueN, sizeof(float)* 1000000);
hipMalloc((void **)&device_size, sizeof(int));
//hipMemcpy(device_C, C, sizeof(float)* N, hipMemcpyHostToDevice);
hipMemcpy(device_rowM, rowM, sizeof(int)* 1000000, hipMemcpyHostToDevice);
hipMemcpy(device_colM, colM, sizeof(int)* 1000000, hipMemcpyHostToDevice);
hipMemcpy(device_rowN, rowN, sizeof(int)* 1000000, hipMemcpyHostToDevice);
hipMemcpy(device_colN, colN, sizeof(int)* 1000000, hipMemcpyHostToDevice);
hipMemcpy(device_indexM, indexM, sizeof(int)* Msize1+1, hipMemcpyHostToDevice);
hipMemcpy(device_indexN, indexN, sizeof(int)* Msize1+1, hipMemcpyHostToDevice);
hipMemcpy(device_lenM, lenM, sizeof(int)* Msize1+1, hipMemcpyHostToDevice);
hipMemcpy(device_lenN, lenN, sizeof(int)* Msize1+1, hipMemcpyHostToDevice);
hipMemcpy(device_valueM, valueM, sizeof(float)* 1000000, hipMemcpyHostToDevice);
hipMemcpy(device_valueN, valueN, sizeof(float)* 1000000, hipMemcpyHostToDevice);
hipMemcpy(device_size, &Msize1, sizeof(int), hipMemcpyHostToDevice);
//int max = -1;
dim3 threadsPerBlock(thread_size, thread_size);
dim3 numBlocks(4, 4);
hipLaunchKernelGGL(( cudaMat) , dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, device_valueM, device_valueN,
device_rowM, device_rowN,
device_colM, device_colN,
device_indexM, device_indexN,
device_lenM, device_lenN,
device_C, device_size);
hipMemcpy(C, device_C, sizeof(float)* 9000*9000, hipMemcpyDeviceToHost);
tend = clock();
cout << "serial elapsed: " << float(tend-start)/CLOCKS_PER_SEC << "s" << endl;
return 0;
}
| 280d31c46498a4b66516a796dbe16fa06b730bd4.cu | #include<iostream>
#include<fstream>
#include<algorithm>
#include<string.h>
#include<string>
#include<ctime>
#include<cstdlib>
#include<cuda_runtime_api.h>
#include<device_launch_parameters.h>
#define block_size 4
#define thread_size 32
using namespace std;
//const int total_threads = block_size*block_size*thread_size*thread_size;
typedef struct {
int row, col;
float value;
}Triple;
Triple M[1000000];
Triple N[1000000];
//Triple C[100000000];
float C[9000][9000];
int rowM[1000000];
int colM[1000000];
float valueM[1000000];
int rowN[1000000];
int colN[1000000];
float valueN[1000000];
bool cmp(Triple x, Triple y) {
if(x.row > y.row || ((x.row == y.row) && (x.col > y.col)))
return false;
return true;
}
__global__ void cudaMat(float valueM[], float valueN[],
int rowM[], int rowN[],
int colM[], int colN[],
int indexM[], int indexN[],
int lenM[], int lenN[],
float C[][9000], int size2[]
)
{
int size = size2[0];
size = 8191;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int my_rank = i*thread_size*block_size + j;
i = my_rank/2;
if(i >= size)
return;
if(indexM[i]==-1)
return;
int jstart = 0;
int jend = size/2;
if(my_rank%2) {
jstart = size/2;
jend = size;
}
for(int j=jstart; j<jend; j++) {
if(indexN[j]==-1)
continue;
int p=0;
int q=0;
float sum = 0.0;
int flag = 0;
while(p<lenM[i] && q<lenN[j]) {
if(colM[p+indexM[i]] == colN[q+indexN[j]]) {
flag = 1;
sum += valueM[p+indexM[i]] *valueN[q+indexN[j]];
++p;++q;
}
else if(colM[p+indexM[i]] < colN[q+indexN[j]]) {
++p;
}
else {
++q;
}
}
if(!flag)
continue;
C[i][j] = sum;
}
}
clock_t start, tend;
int main()
{
int Msize1, Msize2, Nsize1, Nsize2;
int numM, numN;
// read data
ifstream fin;
fin.open("matrix.mat", ios::in);
//fin.open("test.dat", ios::in);
string s;
fin >> s >> s >> s >> s >> s;
fin >> Msize1 >> Msize2 >> numM;
int cnt = 0;
int row, col;
float value;
while (!fin.eof()) {
fin >> row >> col >> value;
M[cnt].row = row;
M[cnt].col = col;
M[cnt].value = value;
++cnt;
}
fin.close();
fin.open("matrix12.mat", ios::in);
//fin.open("test2.dat", ios::in);
fin >> s >> s >> s >> s >> s;
fin >> Nsize1 >> Nsize2 >> numN;
cnt = 0;
while (!fin.eof()) {
fin >> row >> col >> value;
N[cnt].row = col;
N[cnt].col = row;
N[cnt].value = value;
++cnt;
}
fin.close();
start = clock();
sort(M, M+numM, cmp);
sort(N, N+numN, cmp);
int indexM[Msize1+1];
int indexN[Nsize2+1];
int lenM[Msize1+1];
int lenN[Nsize2+1];
memset(indexM, -1, Msize1*sizeof(int));
memset(indexN, -1, Nsize2*sizeof(int));
cnt = 0;
int k = -1;
for(int i=0; i<numM; i++) {
if(indexM[M[i].row] == -1) {
indexM[M[i].row] = i;
k = M[i].row;
lenM[k] = 1;
while(M[i+1].row == k) {
i++;
lenM[k]++;
}
}
}
for(int i=0; i<numM; i++) {
rowM[i] = M[i].row;
colM[i] = M[i].col;
valueM[i] = M[i].value;
}
for(int i=0; i<numN; i++) {
if(indexN[N[i].row] == -1) {
indexN[N[i].row] = i;
k = N[i].row;
lenN[k] = 1;
while(N[i+1].row == k) {
i++;
lenN[k]++;
}
}
}
for(int i=0; i<numN; i++) {
rowN[i] = N[i].row;
colN[i] = N[i].col;
valueN[i] = N[i].value;
}
cnt = 0;
// split row i into different process
int *device_rowM, *device_colM, *device_rowN, *device_colN, *device_indexM, *device_indexN, *device_lenM, *device_lenN;
float *device_valueM, *device_valueN;
float (*device_C)[9000];
int *device_size;
cudaMalloc((void **)&device_C, sizeof(float)* 9000*9000);
cudaMalloc((void **)&device_rowM, sizeof(int)* 1000000);
cudaMalloc((void **)&device_colM, sizeof(int)* 1000000);
cudaMalloc((void **)&device_rowN, sizeof(int)* 1000000);
cudaMalloc((void **)&device_colN, sizeof(int)* 1000000);
cudaMalloc((void **)&device_indexM, sizeof(int)* 1000000);
cudaMalloc((void **)&device_indexN, sizeof(int)* 1000000);
cudaMalloc((void **)&device_lenM, sizeof(int)* 1000000);
cudaMalloc((void **)&device_lenN, sizeof(int)* 1000000);
cudaMalloc((void **)&device_valueM, sizeof(float)* 1000000);
cudaMalloc((void **)&device_valueN, sizeof(float)* 1000000);
cudaMalloc((void **)&device_size, sizeof(int));
//cudaMemcpy(device_C, C, sizeof(float)* N, cudaMemcpyHostToDevice);
cudaMemcpy(device_rowM, rowM, sizeof(int)* 1000000, cudaMemcpyHostToDevice);
cudaMemcpy(device_colM, colM, sizeof(int)* 1000000, cudaMemcpyHostToDevice);
cudaMemcpy(device_rowN, rowN, sizeof(int)* 1000000, cudaMemcpyHostToDevice);
cudaMemcpy(device_colN, colN, sizeof(int)* 1000000, cudaMemcpyHostToDevice);
cudaMemcpy(device_indexM, indexM, sizeof(int)* Msize1+1, cudaMemcpyHostToDevice);
cudaMemcpy(device_indexN, indexN, sizeof(int)* Msize1+1, cudaMemcpyHostToDevice);
cudaMemcpy(device_lenM, lenM, sizeof(int)* Msize1+1, cudaMemcpyHostToDevice);
cudaMemcpy(device_lenN, lenN, sizeof(int)* Msize1+1, cudaMemcpyHostToDevice);
cudaMemcpy(device_valueM, valueM, sizeof(float)* 1000000, cudaMemcpyHostToDevice);
cudaMemcpy(device_valueN, valueN, sizeof(float)* 1000000, cudaMemcpyHostToDevice);
cudaMemcpy(device_size, &Msize1, sizeof(int), cudaMemcpyHostToDevice);
//int max = -1;
dim3 threadsPerBlock(thread_size, thread_size);
dim3 numBlocks(4, 4);
cudaMat <<< numBlocks, threadsPerBlock >>>(device_valueM, device_valueN,
device_rowM, device_rowN,
device_colM, device_colN,
device_indexM, device_indexN,
device_lenM, device_lenN,
device_C, device_size);
cudaMemcpy(C, device_C, sizeof(float)* 9000*9000, cudaMemcpyDeviceToHost);
tend = clock();
cout << "serial elapsed: " << float(tend-start)/CLOCKS_PER_SEC << "s" << endl;
return 0;
}
|
3c266a2822d576d401c53fb31cebc99d821722ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/op_kernel_state_wrapper.h"
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/device/device_context.h"
#include "oneflow/core/framework/random_generator.h"
#include "oneflow/user/kernels/range_kernel_util.h"
#include "oneflow/user/kernels/distributions/uniform_kernel.h"
#include "oneflow/user/kernels/radix_sort.cuh"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
namespace oneflow {
__global__ void GeneKeysAndValues(const int32_t n, int32_t* values, int32_t* keys,
hiprandState_t* state) {
XPU_1D_KERNEL_LOOP(i, n) {
keys[i] = hiprand(state + i);
values[i] = i;
}
}
class GpuRandPermKernel final : public user_op::OpKernel {
public:
GpuRandPermKernel() = default;
~GpuRandPermKernel() = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
const auto& generator = CHECK_JUST(one::MakeAutoGenerator());
generator->set_current_seed(ctx->Attr<int64_t>("seed"));
return std::make_shared<UniformKernelState>(generator);
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
int32_t* output = out->mut_dptr<int32_t>();
const int32_t n = ctx->Attr<int32_t>("n");
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
auto* randperm_kernel_state = dynamic_cast<UniformKernelState*>(state);
CHECK_NOTNULL(randperm_kernel_state);
const auto& generator = randperm_kernel_state->generator();
const auto& gpu_generator = CHECK_JUST(generator->Get<one::CUDAGeneratorImpl>());
CHECK_NOTNULL(generator);
int32_t block_num = gpu_generator->max_block_num();
int32_t thread_num = gpu_generator->max_thread_num();
hiprandState_t* curand_states = gpu_generator->curand_states();
// layout for tmp |...key(in and out,2xN)..|....value....|.... space for sort function....|
// values are the desired indexes ,and keys are generated randomly.
void* tmp = tmp_buffer->mut_dptr<void>();
int32_t* key_base = reinterpret_cast<int32_t*>(tmp);
const int32_t key_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
int32_t* value_base =
reinterpret_cast<int32_t*>(reinterpret_cast<char*>(key_base) + 2 * key_aligned_bytes);
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
void* tmp_base =
reinterpret_cast<void*>(reinterpret_cast<char*>(value_base) + indices_aligned_bytes);
size_t temp_storage_bytes = InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
hipLaunchKernelGGL(( GeneKeysAndValues), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(), n, value_base, key_base, curand_states);
auto err = hipcub::DeviceRadixSort::SortPairs(
/* d_temp_storage */ tmp_base,
/* temp_storage_bytes */ temp_storage_bytes,
/* d_keys_in */ key_base,
/* d_keys_out */ key_base + n,
/* d_values_in */ value_base,
/* d_values_out */ output,
/* num_items */ n,
/* begin_bit */ 0,
/* end_bit */ sizeof(int32_t) * 8,
/* stream */ ctx->device_ctx()->cuda_stream());
OF_CUDA_CHECK(err);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("randperm")
.SetCreateFn<GpuRandPermKernel>()
.SetIsMatchedHob(user_op::HobDeviceTag() == "gpu")
.SetInferTmpSizeFn([](user_op::InferContext* ctx) {
const int32_t n = ctx->Attr<int32_t>("n");
/* Sorted In */
const int32_t sorted_in_aligned_bytes = 2 * GetCudaAlignedSize(n * sizeof(int32_t));
/* Indices */
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
/* CUB Temp Storage */
const int32_t temp_storage_bytes =
InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
return sorted_in_aligned_bytes + indices_aligned_bytes + temp_storage_bytes;
});
} // namespace oneflow
| 3c266a2822d576d401c53fb31cebc99d821722ce.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/op_kernel_state_wrapper.h"
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/device/device_context.h"
#include "oneflow/core/framework/random_generator.h"
#include "oneflow/user/kernels/range_kernel_util.h"
#include "oneflow/user/kernels/distributions/uniform_kernel.h"
#include "oneflow/user/kernels/radix_sort.cuh"
#include <curand.h>
#include <curand_kernel.h>
namespace oneflow {
__global__ void GeneKeysAndValues(const int32_t n, int32_t* values, int32_t* keys,
curandState* state) {
XPU_1D_KERNEL_LOOP(i, n) {
keys[i] = curand(state + i);
values[i] = i;
}
}
class GpuRandPermKernel final : public user_op::OpKernel {
public:
GpuRandPermKernel() = default;
~GpuRandPermKernel() = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
const auto& generator = CHECK_JUST(one::MakeAutoGenerator());
generator->set_current_seed(ctx->Attr<int64_t>("seed"));
return std::make_shared<UniformKernelState>(generator);
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
int32_t* output = out->mut_dptr<int32_t>();
const int32_t n = ctx->Attr<int32_t>("n");
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
auto* randperm_kernel_state = dynamic_cast<UniformKernelState*>(state);
CHECK_NOTNULL(randperm_kernel_state);
const auto& generator = randperm_kernel_state->generator();
const auto& gpu_generator = CHECK_JUST(generator->Get<one::CUDAGeneratorImpl>());
CHECK_NOTNULL(generator);
int32_t block_num = gpu_generator->max_block_num();
int32_t thread_num = gpu_generator->max_thread_num();
curandState* curand_states = gpu_generator->curand_states();
// layout for tmp |...key(in and out,2xN)..|....value....|.... space for sort function....|
// values are the desired indexes ,and keys are generated randomly.
void* tmp = tmp_buffer->mut_dptr<void>();
int32_t* key_base = reinterpret_cast<int32_t*>(tmp);
const int32_t key_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
int32_t* value_base =
reinterpret_cast<int32_t*>(reinterpret_cast<char*>(key_base) + 2 * key_aligned_bytes);
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
void* tmp_base =
reinterpret_cast<void*>(reinterpret_cast<char*>(value_base) + indices_aligned_bytes);
size_t temp_storage_bytes = InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
GeneKeysAndValues<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(n, value_base, key_base, curand_states);
auto err = cub::DeviceRadixSort::SortPairs(
/* d_temp_storage */ tmp_base,
/* temp_storage_bytes */ temp_storage_bytes,
/* d_keys_in */ key_base,
/* d_keys_out */ key_base + n,
/* d_values_in */ value_base,
/* d_values_out */ output,
/* num_items */ n,
/* begin_bit */ 0,
/* end_bit */ sizeof(int32_t) * 8,
/* stream */ ctx->device_ctx()->cuda_stream());
OF_CUDA_CHECK(err);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("randperm")
.SetCreateFn<GpuRandPermKernel>()
.SetIsMatchedHob(user_op::HobDeviceTag() == "gpu")
.SetInferTmpSizeFn([](user_op::InferContext* ctx) {
const int32_t n = ctx->Attr<int32_t>("n");
/* Sorted In */
const int32_t sorted_in_aligned_bytes = 2 * GetCudaAlignedSize(n * sizeof(int32_t));
/* Indices */
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
/* CUB Temp Storage */
const int32_t temp_storage_bytes =
InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
return sorted_in_aligned_bytes + indices_aligned_bytes + temp_storage_bytes;
});
} // namespace oneflow
|
3189817b5a0704389a1dfe45e1344d46f19b463b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
// Pvalue is used to store the element of the output matrix
// that is computed by the thread
float Pvalue = 0;
for (int k = 0; k < ncols; ++k) {
float Melement = Md[row*ncols+k];
float Nelement = Nd[k*ncols+col];
Pvalue += Melement * Nelement;
}
Pd[row*ncols+col] = Pvalue;
} | 3189817b5a0704389a1dfe45e1344d46f19b463b.cu | #include "includes.h"
__global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
// Pvalue is used to store the element of the output matrix
// that is computed by the thread
float Pvalue = 0;
for (int k = 0; k < ncols; ++k) {
float Melement = Md[row*ncols+k];
float Nelement = Nd[k*ncols+col];
Pvalue += Melement * Nelement;
}
Pd[row*ncols+col] = Pvalue;
} |
9fc7d0614515c74a304f71c11cacb6861425a04b.hip | // !!! This is a file automatically generated by hipify!!!
#pragma comment (lib, "cublas.lib")
#pragma comment (lib, "hiprand.lib")
#include <cstdlib>
#include <hiprand/hiprand.h>
#include <stdio.h>
#include <stdlib.h>
#include "time.h"
#include <rocblas.h>
void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) {
hiprandGenerator_t prng;
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(prng, time(0));
hiprandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
}
void gpu_blas_mmul(const float *A, const float *B, float *C, const int m, const int k, const int n) {
int lda = m, ldb = k, ldc = m;
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
hipblasDestroy(handle);
}
void print_matrix(const float *A, int nr_rows_A, int nr_cols_A) {
for (int i = 0; i < nr_rows_A; ++i) {
for (int j = 0; j < nr_cols_A; ++j) {
printf("%f ", A[j * nr_rows_A + i]);
}
printf("\n");
}
printf("\n");
}
int main() {
int m, k, n;
m = 10000;
k = 10000;
n = 10000;
float *h_A = (float *) malloc(m * k * sizeof(float));
float *h_B = (float *) malloc(n * k * sizeof(float));
float *h_C = (float *) malloc(m * n * sizeof(float));
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, m * k * sizeof(float));
hipMalloc(&d_B, n * k * sizeof(float));
hipMalloc(&d_C, m * n * sizeof(float));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
GPU_fill_rand(d_A, m, k);
GPU_fill_rand(d_B, n, k);
hipMemcpy(h_A, d_A, m * k * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_B, d_B, n * k * sizeof(float), hipMemcpyDeviceToHost);
// printf("A:\n");
// print_matrix(h_A, m, k);
// printf("B:\n");
// print_matrix(h_B, n, k);
gpu_blas_mmul(d_A, d_B, d_C, m, k, k);
hipMemcpy(h_C, d_C, m * n * sizeof(float), hipMemcpyDeviceToHost);
// printf("C:\n");
// print_matrix(h_C, m, n);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("The time is %.6f ms\n", milliseconds);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
// nvcc -o lab12_3 lab_12_3.cu -lcurand -lcublas
| 9fc7d0614515c74a304f71c11cacb6861425a04b.cu | #pragma comment (lib, "cublas.lib")
#pragma comment (lib, "curand.lib")
#include <cstdlib>
#include <curand.h>
#include <stdio.h>
#include <stdlib.h>
#include "time.h"
#include <cublas_v2.h>
void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) {
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(prng, time(0));
curandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
}
void gpu_blas_mmul(const float *A, const float *B, float *C, const int m, const int k, const int n) {
int lda = m, ldb = k, ldc = m;
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
cublasHandle_t handle;
cublasCreate(&handle);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
cublasDestroy(handle);
}
void print_matrix(const float *A, int nr_rows_A, int nr_cols_A) {
for (int i = 0; i < nr_rows_A; ++i) {
for (int j = 0; j < nr_cols_A; ++j) {
printf("%f ", A[j * nr_rows_A + i]);
}
printf("\n");
}
printf("\n");
}
int main() {
int m, k, n;
m = 10000;
k = 10000;
n = 10000;
float *h_A = (float *) malloc(m * k * sizeof(float));
float *h_B = (float *) malloc(n * k * sizeof(float));
float *h_C = (float *) malloc(m * n * sizeof(float));
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, m * k * sizeof(float));
cudaMalloc(&d_B, n * k * sizeof(float));
cudaMalloc(&d_C, m * n * sizeof(float));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
GPU_fill_rand(d_A, m, k);
GPU_fill_rand(d_B, n, k);
cudaMemcpy(h_A, d_A, m * k * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_B, n * k * sizeof(float), cudaMemcpyDeviceToHost);
// printf("A:\n");
// print_matrix(h_A, m, k);
// printf("B:\n");
// print_matrix(h_B, n, k);
gpu_blas_mmul(d_A, d_B, d_C, m, k, k);
cudaMemcpy(h_C, d_C, m * n * sizeof(float), cudaMemcpyDeviceToHost);
// printf("C:\n");
// print_matrix(h_C, m, n);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("The time is %.6f ms\n", milliseconds);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
// nvcc -o lab12_3 lab_12_3.cu -lcurand -lcublas
|
7a3943443a47a556444758b7ef6b00ccd06e9501.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 256
__global__ void add(int *a, int *b, int *c);
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int i;
// allocate space for device copies
hipMalloc(&d_a, N*sizeof(int));
hipMalloc(&d_b, N*sizeof(int));
hipMalloc(&d_c, N*sizeof(int));
// allocate variables
a = (int *)malloc(N*sizeof(int));
b = (int *)malloc(N*sizeof(int));
c = (int *)malloc(N*sizeof(int));
// attribute values to arrays
for(i = 0; i < N; i++)
{
a[i] = i;
b[i] = i;
}
// copy inputs to device
hipMemcpy(d_a, a, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, N*sizeof(int), hipMemcpyHostToDevice);
// Lauch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(1),dim3(N), 0, 0, d_a, d_b, d_c);
// copy result back to Host
hipMemcpy(c, d_c, N*sizeof(int), hipMemcpyDeviceToHost);
for(i = 0; i < N; i++)
printf("c[%d] = %d\n", i + 1, c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
__global__ void add(int *a, int *b, int *c)
{
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
| 7a3943443a47a556444758b7ef6b00ccd06e9501.cu | #include <stdio.h>
#define N 256
__global__ void add(int *a, int *b, int *c);
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int i;
// allocate space for device copies
cudaMalloc(&d_a, N*sizeof(int));
cudaMalloc(&d_b, N*sizeof(int));
cudaMalloc(&d_c, N*sizeof(int));
// allocate variables
a = (int *)malloc(N*sizeof(int));
b = (int *)malloc(N*sizeof(int));
c = (int *)malloc(N*sizeof(int));
// attribute values to arrays
for(i = 0; i < N; i++)
{
a[i] = i;
b[i] = i;
}
// copy inputs to device
cudaMemcpy(d_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
// Lauch add() kernel on GPU
add<<<1,N>>>(d_a, d_b, d_c);
// copy result back to Host
cudaMemcpy(c, d_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++)
printf("c[%d] = %d\n", i + 1, c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
__global__ void add(int *a, int *b, int *c)
{
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
|
0ccc4f6f16d10d50ff927aeb55f9fc13524c647e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Author: Daniel LIndberg
#include <stdio.h>
#include "../include/gputimer.h"
//#include "utils.h"
const int N = 1024; // matrix size will be NxN
const int TILE_SIZE = 16; // For tiled transposition method using shared memory
int compare_matrices(float *gpu, float *ref, int N)
{
int result = 0;
for(int j=0; j < N; j++) {
for(int i=0; i < N; i++) {
if (ref[i + j*N] != gpu[i + j*N]) {
result = 1;
}
}
}
return result;
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat, int N)
{
for(int j=0; j < N * N; j++) {
mat[j] = (float) j;
}
}
// The following functions and kernels are for your references
void transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++) {
for(int i=0; i < N; i++) {
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
}
}
// to be launched on a single thread
__global__ void transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++) {
for(int i=0; i < N; i++) {
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
}
}
// to be launched with one thread per row of output matrix
__global__ void transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
for(int j=0; j < N; j++) {
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
}
// Write two tiled versions of transpose -- One using shared memory.
// To be launched with one thread per element, in KxK threadblocks.
// You will determine for each thread (x,y) in tile the element (i,j) of global output matrix.
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
//TODO
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
out[j + i * N] = in[i + j * N];
}
__global__ void
transpose_parallel_per_element_tiled_shared(float in[], float out[])
{
//TODO
__shared__ float cache[32 * 32];
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
// Identify our location in shared memory and copy corresponding data over from global
int sharedT = threadIdx.y + threadIdx.x * blockDim.x; // T for transposed value
int sharedO = threadIdx.x + threadIdx.y * blockDim.y; // O for original value
cache[sharedO] = in[i + j * N];
// Wait for all threads to copy from global to shared memory
__syncthreads();
int tVal = (threadIdx.x + blockIdx.y * blockDim.y) + (threadIdx.y + blockIdx.x * blockDim.x) * N; // Transpose value
out[tVal] = cache[sharedT]; // Finish by copying the transpose value from shared memory to global memory
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in, N);
transpose_CPU(in, gold);
float *d_in, *d_out;
hipMalloc(&d_in, numbytes);
hipMalloc(&d_out, numbytes);
hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
hipLaunchKernelGGL(( transpose_serial), dim3(1),dim3(1), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i) {
out[i] = 0.0;
}
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying ...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipMemcpy(d_out, d_in, numbytes, hipMemcpyDeviceToDevice); //clean d_out
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_row), dim3(1),dim3(N), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i) {
out[i] = 0.0;
} //clean out
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying ...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipMemcpy(d_out, d_in, numbytes, hipMemcpyDeviceToDevice); //clean d_out
// Tiled versions
const int K = TILE_SIZE;
dim3 blocks_tiled(N/K,N/K);
dim3 threads_tiled(K,K);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled), dim3(blocks_tiled),dim3(threads_tiled), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i) {
out[i] = 0.0;
}
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipMemcpy(d_out, d_in, numbytes, hipMemcpyDeviceToDevice); //clean d_out
dim3 blocks_tiled_sh(N/K,N/K);
dim3 threads_tiled_sh(K,K);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled_shared), dim3(blocks_tiled_sh),dim3(threads_tiled_sh), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i) {
out[i] = 0.0;
}
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_shared %dx%d: %g ms.\nVerifying ...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipFree(d_in);
hipFree(d_out);
}
| 0ccc4f6f16d10d50ff927aeb55f9fc13524c647e.cu | //Author: Daniel LIndberg
#include <stdio.h>
#include "../include/gputimer.h"
//#include "utils.h"
const int N = 1024; // matrix size will be NxN
const int TILE_SIZE = 16; // For tiled transposition method using shared memory
int compare_matrices(float *gpu, float *ref, int N)
{
int result = 0;
for(int j=0; j < N; j++) {
for(int i=0; i < N; i++) {
if (ref[i + j*N] != gpu[i + j*N]) {
result = 1;
}
}
}
return result;
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat, int N)
{
for(int j=0; j < N * N; j++) {
mat[j] = (float) j;
}
}
// The following functions and kernels are for your references
void transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++) {
for(int i=0; i < N; i++) {
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
}
}
// to be launched on a single thread
__global__ void transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++) {
for(int i=0; i < N; i++) {
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
}
}
// to be launched with one thread per row of output matrix
__global__ void transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
for(int j=0; j < N; j++) {
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
}
// Write two tiled versions of transpose -- One using shared memory.
// To be launched with one thread per element, in KxK threadblocks.
// You will determine for each thread (x,y) in tile the element (i,j) of global output matrix.
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
//TODO
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
out[j + i * N] = in[i + j * N];
}
__global__ void
transpose_parallel_per_element_tiled_shared(float in[], float out[])
{
//TODO
__shared__ float cache[32 * 32];
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
// Identify our location in shared memory and copy corresponding data over from global
int sharedT = threadIdx.y + threadIdx.x * blockDim.x; // T for transposed value
int sharedO = threadIdx.x + threadIdx.y * blockDim.y; // O for original value
cache[sharedO] = in[i + j * N];
// Wait for all threads to copy from global to shared memory
__syncthreads();
int tVal = (threadIdx.x + blockIdx.y * blockDim.y) + (threadIdx.y + blockIdx.x * blockDim.x) * N; // Transpose value
out[tVal] = cache[sharedT]; // Finish by copying the transpose value from shared memory to global memory
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in, N);
transpose_CPU(in, gold);
float *d_in, *d_out;
cudaMalloc(&d_in, numbytes);
cudaMalloc(&d_out, numbytes);
cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i) {
out[i] = 0.0;
}
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying ...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaMemcpy(d_out, d_in, numbytes, cudaMemcpyDeviceToDevice); //clean d_out
timer.Start();
transpose_parallel_per_row<<<1,N>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i) {
out[i] = 0.0;
} //clean out
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying ...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaMemcpy(d_out, d_in, numbytes, cudaMemcpyDeviceToDevice); //clean d_out
// Tiled versions
const int K = TILE_SIZE;
dim3 blocks_tiled(N/K,N/K);
dim3 threads_tiled(K,K);
timer.Start();
transpose_parallel_per_element_tiled<<<blocks_tiled,threads_tiled>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i) {
out[i] = 0.0;
}
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaMemcpy(d_out, d_in, numbytes, cudaMemcpyDeviceToDevice); //clean d_out
dim3 blocks_tiled_sh(N/K,N/K);
dim3 threads_tiled_sh(K,K);
timer.Start();
transpose_parallel_per_element_tiled_shared<<<blocks_tiled_sh,threads_tiled_sh>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i) {
out[i] = 0.0;
}
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_shared %dx%d: %g ms.\nVerifying ...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaFree(d_in);
cudaFree(d_out);
}
|
particleSystem_hip.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This file contains C wrappers around the some of the CUDA API and the
// kernel functions so that they can be called from "particleSystem.cpp"
#if defined(__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_functions.h>
#include "thrust/device_ptr.h"
#include "thrust/for_each.h"
#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
#include "particles_kernel_impl.cuh"
extern "C"
{
void cudaInit(int argc, char **argv)
{
int devID;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("No CUDA Capable devices found, exiting...\n");
exit(EXIT_SUCCESS);
}
}
void cudaGLInit(int argc, char **argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaGLDevice(argc, (const char **)argv);
}
void allocateArray(void **devPtr, size_t size)
{
checkCudaErrors(hipMalloc(devPtr, size));
}
void freeArray(void *devPtr)
{
checkCudaErrors(hipFree(devPtr));
}
void threadSync()
{
checkCudaErrors(hipDeviceSynchronize());
}
void copyArrayToDevice(void *device, const void *host, int offset, int size)
{
checkCudaErrors(hipMemcpy((char *) device + offset, host, size, hipMemcpyHostToDevice));
}
void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
hipGraphicsMapFlagsNone));
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource));
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
checkCudaErrors(hipGraphicsMapResources(1, cuda_vbo_resource, 0));
size_t num_bytes;
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes,
*cuda_vbo_resource));
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
void copyArrayFromDevice(void *host, const void *device,
struct cudaGraphicsResource **cuda_vbo_resource, int size)
{
if (cuda_vbo_resource)
{
device = mapGLBufferObject(cuda_vbo_resource);
}
checkCudaErrors(hipMemcpy(host, device, size, hipMemcpyDeviceToHost));
if (cuda_vbo_resource)
{
unmapGLBufferObject(*cuda_vbo_resource);
}
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
checkCudaErrors(hipMemcpyToSymbol(params, hostParams, sizeof(SimParams)));
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
void integrateSystem(float *pos,
float *vel,
float deltaTime,
uint numParticles)
{
thrust::device_ptr<float4> d_pos4((float4 *)pos);
thrust::device_ptr<float4> d_vel4((float4 *)vel);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_pos4, d_vel4)),
thrust::make_zip_iterator(thrust::make_tuple(d_pos4+numParticles, d_vel4+numParticles)),
integrate_functor(deltaTime));
}
void calcHash(uint *gridParticleHash,
uint *gridParticleIndex,
float *pos,
int numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( calcHashD), dim3(numBlocks), dim3(numThreads) , 0, 0, gridParticleHash,
gridParticleIndex,
(float4 *) pos,
numParticles);
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
void reorderDataAndFindCellStart(uint *cellStart,
uint *cellEnd,
float *sortedPos,
float *sortedVel,
uint *gridParticleHash,
uint *gridParticleIndex,
float *oldPos,
float *oldVel,
uint numParticles,
uint numCells)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// set all cells to empty
checkCudaErrors(hipMemset(cellStart, 0xffffffff, numCells*sizeof(uint)));
#if USE_TEX
checkCudaErrors(hipBindTexture(0, oldPosTex, oldPos, numParticles*sizeof(float4)));
checkCudaErrors(hipBindTexture(0, oldVelTex, oldVel, numParticles*sizeof(float4)));
#endif
uint smemSize = sizeof(uint)*(numThreads+1);
hipLaunchKernelGGL(( reorderDataAndFindCellStartD), dim3(numBlocks), dim3(numThreads), smemSize, 0,
cellStart,
cellEnd,
(float4 *) sortedPos,
(float4 *) sortedVel,
gridParticleHash,
gridParticleIndex,
(float4 *) oldPos,
(float4 *) oldVel,
numParticles);
getLastCudaError("Kernel execution failed: reorderDataAndFindCellStartD");
#if USE_TEX
checkCudaErrors(hipUnbindTexture(oldPosTex));
checkCudaErrors(hipUnbindTexture(oldVelTex));
#endif
}
void collide(float *newVel,
float *sortedPos,
float *sortedVel,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
uint numParticles,
uint numCells)
{
#if USE_TEX
checkCudaErrors(hipBindTexture(0, oldPosTex, sortedPos, numParticles*sizeof(float4)));
checkCudaErrors(hipBindTexture(0, oldVelTex, sortedVel, numParticles*sizeof(float4)));
checkCudaErrors(hipBindTexture(0, cellStartTex, cellStart, numCells*sizeof(uint)));
checkCudaErrors(hipBindTexture(0, cellEndTex, cellEnd, numCells*sizeof(uint)));
#endif
// thread per particle
uint numThreads, numBlocks;
computeGridSize(numParticles, 64, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( collideD), dim3(numBlocks), dim3(numThreads) , 0, 0, (float4 *)newVel,
(float4 *)sortedPos,
(float4 *)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles);
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
#if USE_TEX
checkCudaErrors(hipUnbindTexture(oldPosTex));
checkCudaErrors(hipUnbindTexture(oldVelTex));
checkCudaErrors(hipUnbindTexture(cellStartTex));
checkCudaErrors(hipUnbindTexture(cellEndTex));
#endif
}
void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<uint>(dGridParticleHash),
thrust::device_ptr<uint>(dGridParticleHash + numParticles),
thrust::device_ptr<uint>(dGridParticleIndex));
}
} // extern "C"
| particleSystem_hip.cuh | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
extern "C"
{
void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles);
void scanParticles(uint *dInput, uint *dOutput, uint count);
void cudaInit(int argc, char **argv);
void allocateArray(void **devPtr, int size);
void freeArray(void *devPtr);
void threadSync();
void copyArrayFromDevice(void* host, const void* device, unsigned int vbo, int size);
void copyArrayToDevice(void* device, const void* host, int offset, int size);
void registerGLBufferObject(unsigned int vbo);
void unregisterGLBufferObject(unsigned int vbo);
void *mapGLBufferObject(uint vbo);
void unmapGLBufferObject(uint vbo);
void setParameters(SimParams *hostParams);
void setRSParameters(RSParams *hostParams);
void calcHash(uint* gridParticleHash,
uint* gridParticleIndex,
float* pos,
int numParticles);
void reorderDataAndFindCellStart(uint* cellStart,
uint* cellEnd,
float* sortedPos,
uint* gridParticleHash,
uint* gridParticleIndex,
float* oldPos,
uint numParticles,
uint numCells);
void countNeighbors( uint* neighborCount,
uint* neighbors,
float* smallCircles,
float* sortedPos,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numAtoms,
uint numNeighbors,
uint numCells);
void countNeighbors2( uint* neighborCount,
uint* neighbors,
float* sortedPos,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numAtoms,
uint numNeighbors,
uint numCells);
void countProbeNeighbors( //uint* probeNeighborCount,
float3* probeNeighborCount,
//uint* probeNeighbors,
float3* probeNeighbors,
float* sortedProbePos,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numProbes,
uint numNeighbors,
uint numCells);
void computeArcsCUDA( float* arcs,
uint* neighborCount,
uint* neighbors,
float* smallCircles,
float* sortedPos,
uint* gridParticleIndex,
uint numAtoms,
uint numNeighbors);
void computeReducedSurfaceCuda(
uint* point1,
//float* point2,
//float* point3,
float* probePos,
uint* neighborCount,
uint* neighbors,
float* atomPos,
uint* gridParticleIndex,
float* visibleAtoms,
uint* visibleAtomsId,
uint numAtoms,
uint numVisibleAtoms,
uint numNeighbors);
void computeTriangleVBOCuda(
float3* vbo,
uint* point1,
//float* point2,
//float* point3,
float* atomPos,
float* visibleAtoms,
uint numAtoms,
uint numVisibleAtoms,
uint numNeighbors,
uint offset);
void computeVisibleTriangleVBOCuda(
float3* vbo,
uint* point1,
hipArray* visiblity,
float* atomPos,
float* visibleAtoms,
uint numAtoms,
uint numVisibleAtoms,
uint numNeighbors,
uint offset);
void computeSESPrimiticesVBOCuda(
float4* outTorusVBO,
float4* outSTriaVBO,
float4* inVBO,
float* atomPos,
float* visibleAtoms,
uint* point1,
float* probePos,
uint numAtoms,
uint numVisibleAtoms,
uint numNeighbors,
uint numVisibleTria);
void writeProbePositionsCuda(
float* probePos,
float4* sTriaVbo,
uint numProbes);
void writeSingularitiesCuda(
float3* outArray,
uint* probeNeighbors,
float* probePos,
uint numProbes,
uint numNeighbors );
void findAdjacentTrianglesCuda(
float* outPbo,
hipArray* visibility,
uint* point1,
float* probePos,
uint* neighborCount,
uint* neighbors,
float* atomPos,
float* visibleAtoms,
uint* visibleAtomsId,
uint numAtoms,
uint numVisibleAtoms,
uint numNeighbors);
void findNeighborsCB(
uint* neighborCount,
uint* neighbors,
float* smallCircles,
float* sortedPos,
uint* cellStart,
uint* cellEnd,
uint numAtoms,
uint numNeighbors,
uint numCells);
void removeCoveredSmallCirclesCB(
float* smallCircles,
uint* smallCircleVisible,
uint* neighborCount,
uint* neighbors,
float* sortedPos,
uint numAtoms,
uint numNeighbors);
void computeArcsCB(
float* smallCircles,
uint* smallCircleVisible,
uint* neighborCount,
uint* neighbors,
float* sortedPos,
float* arcs,
uint* arcCount,
uint numAtoms,
uint numNeighbors);
void writeProbePositionsCB(
float* probePos,
float* sphereTriaVec1,
float* sphereTriaVec2,
float* sphereTriaVec3,
float* torusPos,
float* torusVS,
float* torusAxis,
uint* neighborCount,
uint* neighbors,
float* sortedAtomPos,
float* arcs,
uint* arcCount,
uint* arcCountScan,
uint* scCount,
uint* scCountScan,
float* smallCircles,
uint numAtoms,
uint numNeighbors);
void writeSingularityTextureCB(
float* texCoord,
float* singTex,
float* sortedProbePos,
uint* gridProbeIndex,
uint* cellStart,
uint* cellEnd,
uint numProbes,
uint numNeighbors,
uint numCells);
}
|
4d0ccc5e419d41d96a872f33c2c96719d18b4947.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream> // std::cout
#include <algorithm> // std::sort
#include <vector> // std::vector
#include <time.h>
using namespace std;
// Parmetre per indicar si es vol memria pinned (1) o no (0)
#define PINNED 0
// Parmetre per indicar el nombre de threads per la invocaci al kernel calculateDistance
#define THREADS 1024
//------------------------------------------------------------------------------------------------------------------------------------------
//FUNCIONS EXTRETES DEL CODI DELS NOSTRES COMPANYS PER FER EL MERGESORT AMB CUDA
__device__ void mergeDevice(float *list, float *sorted, float *list2, float *sorted2, int start, int mid, int end)
{
int ti=start, i=start, j=mid;
while (i<mid || j<end)
{
if (j==end) {
sorted[ti] = list[i];
sorted2[ti] = list2[i];
i++;
}
else if (i==mid) {
sorted[ti] = list[j];
sorted2[ti] = list2[j];
j++;
}
else if (list[i]<list[j]) {
sorted[ti] = list[i];
sorted2[ti] = list2[i];
i++;
}
else {
sorted[ti] = list[j];
sorted2[ti] = list2[j];
j++;
}
ti++;
}
for (ti=start; ti<end; ti++) {
list[ti] = sorted[ti];
list2[ti] = sorted2[ti];
}
}
void mergeHost(float *list, float *sorted, float *list2, float *sorted2, int start, int mid, int end)
{
int ti=start, i=start, j=mid;
while (i<mid || j<end)
{
if (j==end) {
sorted[ti] = list[i];
sorted2[ti] = list2[i];
i++;
}
else if (i==mid) {
sorted[ti] = list[j];
sorted2[ti] = list2[j];
j++;
}
else if (list[i]<list[j]) {
sorted[ti] = list[i];
sorted2[ti] = list2[i];
i++;
}
else {
sorted[ti] = list[j];
sorted2[ti] = list2[j];
j++;
}
ti++;
}
for (ti=start; ti<end; ti++) {
list[ti] = sorted[ti];
list2[ti] = sorted2[ti];
}
}
__device__ void mergeSortKernel(float *list, float *sorted, float *list2, float *sorted2, int start, int end)
{
//Final 1: hi ha mes threads que elements del vector
if (end-start<2)
return;
mergeSortKernel(list, sorted, list2, sorted2, start, start + (end-start)/2);
mergeSortKernel(list, sorted, list2, sorted2, start + (end-start)/2, end);
mergeDevice(list, sorted, list2, sorted2, start, start + (end-start)/2, end);
}
__global__ void callMerge(float *list, float *sorted, float *list2, float *sorted2, int chunkSize, int N) {
if (chunkSize >= N)
return;
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int start = tid*chunkSize;
int end = start + chunkSize;
if (end > N) {
end = N;
}
mergeDevice(list, sorted, list2, sorted2, start, start + (end-start)/2, end);
}
__global__ void callMergeSort(float *list, float *sorted, float *list2, float *sorted2, int chunkSize, int N) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int start = tid*chunkSize;
int end = start + chunkSize;
if (end > N) {
end = N;
}
mergeSortKernel(list, sorted, list2, sorted2, start, end);
}
void sortBlocks(float *list, float *sorted, float *list2, float *sorted2, int N) {
int chunkSize = N;
int start = 0;
int end = chunkSize;
int mid = (start+end)/2;
mergeHost(list, sorted, list2, sorted2, start, mid, end);
}
//------------------------------------------------------------------------------------------------------------------------------------------
//FUNCIONS UTILITZADES EN LA VERSI SEQENCIAL PER ORDENAR AMB QUICKSORT
// A utility function to swap two elements
void swap(float* a, float* b, float* c, float* d)
{
int t = *a;
int t2 = *c;
*a = *b;
*c = *d;
*b = t;
*d = t2;
}
/* This function takes last element as pivot, places
the pivot element at its correct position in sorted
array, and places all smaller (smaller than pivot)
to left of pivot and all greater elements to right
of pivot */
int partition (float *result_prediction_host, float *ref_points_host_val, int low, int high)
{
int pivot = result_prediction_host[high]; // pivot
int i = (low - 1); // Index of smaller element
for (int j = low; j <= high- 1; j++)
{
// If current element is smaller than or
// equal to pivot
if (result_prediction_host[j] <= pivot)
{
i++; // increment index of smaller element
swap(&result_prediction_host[i], &result_prediction_host[j], &ref_points_host_val[i], &ref_points_host_val[j]);
}
}
swap(&result_prediction_host[i + 1], &result_prediction_host[high], &ref_points_host_val[i + 1], &ref_points_host_val[high]);
return (i + 1);
}
/* The main function that implements QuickSort
arr[] --> Array to be sorted,
low --> Starting index,
high --> Ending index */
void quickSort(float *result_prediction_host, float *ref_points_host_val, int low, int high)
{
if (low < high)
{
/* pi is partitioning index, arr[p] is now
at right place */
int pi = partition(result_prediction_host, ref_points_host_val, low, high);
// Separately sort elements before
// partition and after partition
quickSort(result_prediction_host, ref_points_host_val, low, pi - 1);
quickSort(result_prediction_host, ref_points_host_val, pi + 1, high);
}
}
//------------------------------------------------------------------------------------------------------------------------------------------
//Estructura utilitzada per representar un punt de coordenades
struct Point
{
float x, y; // Co-ordinate of point
};
/** Funci que es crida per fer el clcul del nostre algorisme knn de manera seqencial.
* @param arr refence points
* @param n number of reference points
* @param k number of points we want to use for the prediction
* @param p point we want to predict
*/
int classifyAPoint(Point arr[], int n, int k, Point p, float val[])
{
float distances[n];
// Fill distances of all points from p
for (int i = 0; i < n; i++)
distances[i] =
sqrt((arr[i].x - p.x) * (arr[i].x - p.x) +
(arr[i].y - p.y) * (arr[i].y - p.y));
// Sort the Points by distance from p
quickSort(distances, val, 0, n-1);
// Now consider the first k elements and only two groups.
int freq1 = 0; // Frequency of group 0
int freq2 = 0; // Frequency of group 1
for (int i = 0; i < k; i++)
{
if (val[i] == 0)
freq1++;
else if (val[i] == 1)
freq2++;
}
printf ("freq1 is %d.\n", freq1);
printf ("freq2 is %d.\n", freq2);
return (freq1 > freq2 ? 0 : 1);
}
//Funci per organitzar i inicialitzar els vectors del host que es necessitaran per fer clculs al device (coordenades i valor(0,1)).
void InitHostInput(Point arr[], int n, float val[], Point p, float *ref_points_host_x, float *ref_points_host_y, float *ref_points_host_val) {
for (int i=0; i<n; i++) {
ref_points_host_x[i] = arr[i].x;
ref_points_host_y[i] = arr[i].y;
ref_points_host_val[i] = val[i];
}
}
//Funci per inicialitzar a 0 els valors de freqncia del host.
void InitHostFreq(unsigned int *freq_host) {
freq_host[0] = 0;
freq_host[1] = 0;
}
//Kernel per calcular la distncia euclediana entre un punt concret(p) i tots de referncia.
__global__ void calculateDistance(Point p, float *ref_points_dev_x, float *ref_points_dev_y, float *result_prediction_dev) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Fill distances of all points from p
result_prediction_dev[i] =
sqrt((ref_points_dev_x[i] - p.x) * (ref_points_dev_x[i] - p.x) +
(ref_points_dev_y[i] - p.y) * (ref_points_dev_y[i] - p.y));
}
//Kernel per calcular les freqencies dels valors 0 i 1.
__global__ void calculateFreq(float *ref_points_host_val, unsigned int *freq_dev) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = ref_points_host_val[i];
atomicAdd(&freq_dev[j], 1);
}
//Funci que es crida per fer el clcul del nostre algorisme knn utilitzant CUDA i kernels.
int classifyAPointCUDA(Point arr[], float val[], int n, int k, Point p)
{
unsigned int numBytes;
unsigned int nBlocks, nThreads;
int chunkSize_sort;
unsigned int nBytes_sort;
unsigned int nBlocks_sort, nThreads_sort;
float TiempoKernelDistance, TiempoSort, TiempoKernelFreq, TiempoAllOperations, TiempoProva;
hipEvent_t E0, E1, E2, E3, E4, E5, E6, E7;
hipEventCreate(&E0);
hipEventCreate(&E1);
hipEventCreate(&E2);
hipEventCreate(&E3);
hipEventCreate(&E4);
hipEventCreate(&E5);
hipEventCreate(&E6);
hipEventCreate(&E7);
hipEventRecord(E6, 0);
float *ref_points_dev_x = NULL;
float *ref_points_dev_y = NULL;
float *ref_points_dev_val = NULL;
float *result_prediction_dev = NULL;
float *ref_points_host_x = NULL;
float *ref_points_host_y = NULL;
float *ref_points_host_val = NULL;
float *result_prediction_host = NULL;
float *arrSorted_h, *arrSortedF_h;
float *arrSorted_d, *arrSortedF_d;
float *arrSorted2_h, *arrSortedF2_h;
float *arrSorted2_d, *arrSortedF2_d;
unsigned int *freq_dev = NULL;
unsigned int *freq_host = NULL;
// numero de Threads
nThreads = THREADS;
// numero de Blocks en cada dimension
nBlocks = (n+nThreads-1)/nThreads;
printf("nBlocks = %d \n", nBlocks);
numBytes = nBlocks * nThreads * sizeof(float);
printf("numBytes = %d \n", numBytes);
nThreads_sort = 128;
nBlocks_sort = 2;
chunkSize_sort = n/(nThreads_sort*nBlocks_sort);
nBytes_sort = n * sizeof(float);
if (PINNED) {
// Obtiene Memoria [pinned] en el host
hipHostMalloc((float**)&ref_points_host_x, numBytes);
hipHostMalloc((float**)&ref_points_host_y, numBytes);
hipHostMalloc((float**)&ref_points_host_val, nBytes_sort);
hipHostMalloc((float**)&result_prediction_host, nBytes_sort);
hipHostMalloc((float**)&arrSorted_h, nBytes_sort);
hipHostMalloc((float**)&arrSortedF_h, nBytes_sort);
hipHostMalloc((float**)&arrSorted2_h, nBytes_sort);
hipHostMalloc((float**)&arrSortedF2_h, nBytes_sort);
hipHostMalloc((unsigned int**)&freq_host, sizeof(unsigned int)*2);
} else {
// Obtener Memoria en el host
ref_points_host_x = (float*) malloc(numBytes);
ref_points_host_y = (float*) malloc(numBytes);
ref_points_host_val = (float*) malloc(nBytes_sort);
result_prediction_host = (float*) malloc(nBytes_sort);
arrSorted_h = (float*) malloc(nBytes_sort);
arrSortedF_h = (float*) malloc(nBytes_sort);
arrSorted2_h = (float*) malloc(nBytes_sort);
arrSortedF2_h = (float*) malloc(nBytes_sort);
freq_host = (unsigned int*) malloc(sizeof(unsigned int)*2);
}
InitHostInput(arr, n, val, p, ref_points_host_x, ref_points_host_y, ref_points_host_val);
InitHostFreq(freq_host);
// Obtener Memoria en el device
hipMalloc((float**)&ref_points_dev_x, numBytes);
hipMalloc((float**)&ref_points_dev_y, numBytes);
hipMalloc((float**)&ref_points_dev_val, nBytes_sort);
hipMalloc((float**)&result_prediction_dev, nBytes_sort);
hipMalloc((float **) &arrSorted_d, nBytes_sort);
hipMalloc((float **) &arrSortedF_d, nBytes_sort);
hipMalloc((float **) &arrSorted2_d, nBytes_sort);
hipMalloc((float **) &arrSortedF2_d, nBytes_sort);
hipMalloc((unsigned int**)&freq_dev, sizeof(unsigned int)*2);
// Copiar datos desde el host en el device
hipMemcpy(ref_points_dev_x, ref_points_host_x, numBytes, hipMemcpyHostToDevice);
hipMemcpy(ref_points_dev_y, ref_points_host_y, numBytes, hipMemcpyHostToDevice);
hipMemcpy(ref_points_dev_val, ref_points_host_val, nBytes_sort, hipMemcpyHostToDevice);
hipMemcpy(freq_dev, freq_host, sizeof(unsigned int)*2, hipMemcpyHostToDevice);
hipEventRecord(E0, 0);
// Ejecutar el kernel
hipLaunchKernelGGL(( calculateDistance), dim3(nBlocks), dim3(nThreads), 0, 0, p, ref_points_dev_x, ref_points_dev_y, result_prediction_dev);
hipEventRecord(E1, 0); hipEventSynchronize(E1);
hipEventElapsedTime(&TiempoKernelDistance, E0, E1);
// Obtener el resultado desde el host
//hipMemcpy(result_prediction_host, result_prediction_dev, numBytes, hipMemcpyDeviceToHost);
// Liberar Memoria del device
hipFree(ref_points_dev_x);
hipFree(ref_points_dev_y);
hipEventRecord(E4, 0);
// Sort the Points by distance from p
printf("Invocaci Kernel Sort <<<nBlocks, nKernels>>> (N): <<<%d, %d>>> (%d)\n", nBlocks_sort, nThreads_sort, n);
hipLaunchKernelGGL(( callMergeSort), dim3(nBlocks_sort), dim3(nThreads_sort), 0, 0, result_prediction_dev, arrSorted_d, ref_points_dev_val, arrSorted2_d, chunkSize_sort, n);
int auxChunkSize = chunkSize_sort*2;
int auxBlock = nBlocks_sort;
int auxThread = nThreads_sort/2;
hipFree(result_prediction_dev);
hipFree(ref_points_dev_val);
while (auxChunkSize < n) {
//printf("Invocaci Kernel Sort 2 <<<nBlocks, nKernels>>> (N): <<<%d, %d>>> (%d)\n", auxBlock, auxThread, n);
hipLaunchKernelGGL(( callMerge), dim3(auxBlock), dim3(auxThread), 0, 0, arrSorted_d, arrSortedF_d, arrSorted2_d, arrSortedF2_d, auxChunkSize, n);
auxChunkSize = auxChunkSize*2;
//auxThread = auxThread/2;
}
hipMemcpy(arrSorted_h, arrSortedF_d, nBytes_sort, hipMemcpyDeviceToHost);
hipMemcpy(arrSorted2_h, arrSortedF2_d, nBytes_sort, hipMemcpyDeviceToHost);
hipFree(arrSorted_d);
hipFree(arrSortedF_d);
sortBlocks(arrSorted_h, arrSortedF_h, arrSorted2_h, arrSortedF2_h, n);
//quickSort(result_prediction_host, ref_points_host_val, 0, n-1);
/*
for(int i = 0; i < n; i++){
printf("L'element: %d\n", i);
printf("La distancia: %f\n", result_prediction_host[i]);
printf("La x: %f\n", ref_points_host_val[i]);
}
*/
hipEventRecord(E5, 0); hipEventSynchronize(E5);
hipEventElapsedTime(&TiempoSort, E4, E5);
hipMemcpy(arrSortedF2_d, arrSortedF2_h, nBytes_sort, hipMemcpyHostToDevice);
hipEventRecord(E2, 0);
// Ejecutar el kernel
hipLaunchKernelGGL(( calculateFreq), dim3(k), dim3(1), 0, 0, arrSortedF2_d, freq_dev);
hipEventRecord(E3, 0); hipEventSynchronize(E3);
hipEventElapsedTime(&TiempoKernelFreq, E2, E3);
TiempoAllOperations = TiempoKernelDistance + TiempoSort + TiempoKernelFreq;
hipMemcpy(freq_host, freq_dev, sizeof(unsigned int)*2, hipMemcpyDeviceToHost);
hipFree(ref_points_dev_val);
hipFree(freq_dev);
int result = -1;
if(freq_host[0] > freq_host[1]) result = 0;
else result = 1;
printf ("freq1 is %d.\n", freq_host[0]);
printf ("freq2 is %d.\n", freq_host[1]);
printf ("The value classified to unknown point"
" is %d.\n", result);
printf("Invocaci Kernel <<<nBlocks, nKernels>>> (N): <<<%d, %d>>> (%d)\n", nBlocks, nThreads, n);
printf("Tiempo Kernel calculo distancia (00): %4.6f milseg\n", TiempoKernelDistance);
printf("Tiempo Kernel calculo freq (00): %4.6f milseg\n", TiempoKernelFreq);
printf("Tiempo Sort (00): %4.6f milseg\n", TiempoSort);
printf("Tiempo todas las operaciones (00): %4.6f milseg\n", TiempoAllOperations);
if (PINNED) printf("Usando Pinned Memory\n");
else printf("NO usa Pinned Memory\n");
if (PINNED) {
hipHostFree(ref_points_host_x); hipHostFree(ref_points_host_y); hipHostFree(ref_points_host_val);
hipHostFree(result_prediction_host); hipHostFree(freq_host); hipHostFree(arrSorted_h);hipHostFree(arrSortedF_h); hipHostFree(arrSorted2_h);hipHostFree(arrSortedF2_h);
} else {
free(ref_points_host_x); free(ref_points_host_y); free(ref_points_host_val); free(result_prediction_host);
free(arrSorted_h); free(arrSortedF_h); free(freq_host); free(arrSorted2_h); free(arrSortedF2_h);
}
hipEventRecord(E7, 0); hipEventSynchronize(E7);
hipEventElapsedTime(&TiempoProva, E6, E7);
printf("Temps total CUDA: %4.6f milseg\n", TiempoProva);
return result;
}
//Inicialitzaci de la k (el nmero de punts ms propers que es vol tenir en compte) per defecte.
void InitKDefecte(int *k) {
// Parameter to decide group of the testing point
(*k) = 15;
}
//Inicialitzaci del punt p (el punt sobre el que es vol fer la predicci de valor) per defecte.
void InitTestPointDefecte(struct Point *p) {
//Test Point
p->x = 2.5;
p->y = 7;
}
//Inicialitzaci conjunta de la k i la p per defecte.
void InitDefecte(int *k, struct Point *p) {
InitKDefecte(k);
InitTestPointDefecte(p);
}
//Funci principal del nostre programa
int main(int argc, char** argv)
{
srand(time(0));
//Es declaren les variables
int n, k;
struct Point p;
//S'inicialitza la K, i les coordenades del Testing point
if (argc == 1) { InitDefecte(&k, &p); }
else if (argc == 2) { k = atoi(argv[1]); InitTestPointDefecte(&p); }
else if (argc == 4) { k = atoi(argv[1]); p.x = atof(argv[2]); p.y = atof(argv[3]);}
else { printf("Usage: ./exe k TestPointCoordenadaX TestPointCoordenadaY\n"); exit(0); }
//Es crea l'estructura sobre la qual es vol fer la predicci
n = 131072; // Number of data points
Point arr[n];
float val_seq[n];
float val_cuda[n];
for(int i = 0; i < n; ++i) {
arr[i].x = rand();
arr[i].y = rand();
val_seq[i] = rand() % 2;
val_cuda[i] = val_seq[i];
}
/*for(int i = 0; i < n; i++){
printf("x: %lf\n", arr[i].x);
printf("y: %lf\n", arr[i].y);
printf("val: %f\n", val[i]);
}*/
printf("k = %d \n", k);
printf("The Testing Point values are:");
printf(" x = %f", p.x);
printf(" and");
printf(" y = %f", p.y);
printf("\n");
printf("\n");
printf("Programa Seqencial -------------------------------------------------- \n");
printf("\n");
// Calculate the time taken by the sequential code: classifyAPoint function
clock_t t;
t = clock();
int result = classifyAPoint(arr, n, k, p, val_seq);
t = clock() - t;
float time_taken = ((float)t)/(CLOCKS_PER_SEC/1000); // in mseconds
printf ("The value classified to unknown point"
" is %d.\n", result);
printf ("Temps total seqencial:"
" %lf milseg.\n", time_taken);
printf("\n");
printf("Programa CUDA -------------------------------------------------------- \n");
printf("\n");
int result2 = classifyAPointCUDA(arr,val_cuda, n, k, p);
printf ("The value classified to unknown point"
" is %d.\n", result2);
}
| 4d0ccc5e419d41d96a872f33c2c96719d18b4947.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream> // std::cout
#include <algorithm> // std::sort
#include <vector> // std::vector
#include <time.h>
using namespace std;
// Paràmetre per indicar si es vol memòria pinned (1) o no (0)
#define PINNED 0
// Paràmetre per indicar el nombre de threads per la invocació al kernel calculateDistance
#define THREADS 1024
//------------------------------------------------------------------------------------------------------------------------------------------
//FUNCIONS EXTRETES DEL CODI DELS NOSTRES COMPANYS PER FER EL MERGESORT AMB CUDA
__device__ void mergeDevice(float *list, float *sorted, float *list2, float *sorted2, int start, int mid, int end)
{
int ti=start, i=start, j=mid;
while (i<mid || j<end)
{
if (j==end) {
sorted[ti] = list[i];
sorted2[ti] = list2[i];
i++;
}
else if (i==mid) {
sorted[ti] = list[j];
sorted2[ti] = list2[j];
j++;
}
else if (list[i]<list[j]) {
sorted[ti] = list[i];
sorted2[ti] = list2[i];
i++;
}
else {
sorted[ti] = list[j];
sorted2[ti] = list2[j];
j++;
}
ti++;
}
for (ti=start; ti<end; ti++) {
list[ti] = sorted[ti];
list2[ti] = sorted2[ti];
}
}
void mergeHost(float *list, float *sorted, float *list2, float *sorted2, int start, int mid, int end)
{
int ti=start, i=start, j=mid;
while (i<mid || j<end)
{
if (j==end) {
sorted[ti] = list[i];
sorted2[ti] = list2[i];
i++;
}
else if (i==mid) {
sorted[ti] = list[j];
sorted2[ti] = list2[j];
j++;
}
else if (list[i]<list[j]) {
sorted[ti] = list[i];
sorted2[ti] = list2[i];
i++;
}
else {
sorted[ti] = list[j];
sorted2[ti] = list2[j];
j++;
}
ti++;
}
for (ti=start; ti<end; ti++) {
list[ti] = sorted[ti];
list2[ti] = sorted2[ti];
}
}
__device__ void mergeSortKernel(float *list, float *sorted, float *list2, float *sorted2, int start, int end)
{
//Final 1: hi ha mes threads que elements del vector
if (end-start<2)
return;
mergeSortKernel(list, sorted, list2, sorted2, start, start + (end-start)/2);
mergeSortKernel(list, sorted, list2, sorted2, start + (end-start)/2, end);
mergeDevice(list, sorted, list2, sorted2, start, start + (end-start)/2, end);
}
__global__ void callMerge(float *list, float *sorted, float *list2, float *sorted2, int chunkSize, int N) {
if (chunkSize >= N)
return;
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int start = tid*chunkSize;
int end = start + chunkSize;
if (end > N) {
end = N;
}
mergeDevice(list, sorted, list2, sorted2, start, start + (end-start)/2, end);
}
__global__ void callMergeSort(float *list, float *sorted, float *list2, float *sorted2, int chunkSize, int N) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int start = tid*chunkSize;
int end = start + chunkSize;
if (end > N) {
end = N;
}
mergeSortKernel(list, sorted, list2, sorted2, start, end);
}
void sortBlocks(float *list, float *sorted, float *list2, float *sorted2, int N) {
int chunkSize = N;
int start = 0;
int end = chunkSize;
int mid = (start+end)/2;
mergeHost(list, sorted, list2, sorted2, start, mid, end);
}
//------------------------------------------------------------------------------------------------------------------------------------------
//FUNCIONS UTILITZADES EN LA VERSIÓ SEQÜENCIAL PER ORDENAR AMB QUICKSORT
// A utility function to swap two elements
void swap(float* a, float* b, float* c, float* d)
{
int t = *a;
int t2 = *c;
*a = *b;
*c = *d;
*b = t;
*d = t2;
}
/* This function takes last element as pivot, places
the pivot element at its correct position in sorted
array, and places all smaller (smaller than pivot)
to left of pivot and all greater elements to right
of pivot */
int partition (float *result_prediction_host, float *ref_points_host_val, int low, int high)
{
int pivot = result_prediction_host[high]; // pivot
int i = (low - 1); // Index of smaller element
for (int j = low; j <= high- 1; j++)
{
// If current element is smaller than or
// equal to pivot
if (result_prediction_host[j] <= pivot)
{
i++; // increment index of smaller element
swap(&result_prediction_host[i], &result_prediction_host[j], &ref_points_host_val[i], &ref_points_host_val[j]);
}
}
swap(&result_prediction_host[i + 1], &result_prediction_host[high], &ref_points_host_val[i + 1], &ref_points_host_val[high]);
return (i + 1);
}
/* The main function that implements QuickSort
arr[] --> Array to be sorted,
low --> Starting index,
high --> Ending index */
void quickSort(float *result_prediction_host, float *ref_points_host_val, int low, int high)
{
if (low < high)
{
/* pi is partitioning index, arr[p] is now
at right place */
int pi = partition(result_prediction_host, ref_points_host_val, low, high);
// Separately sort elements before
// partition and after partition
quickSort(result_prediction_host, ref_points_host_val, low, pi - 1);
quickSort(result_prediction_host, ref_points_host_val, pi + 1, high);
}
}
//------------------------------------------------------------------------------------------------------------------------------------------
//Estructura utilitzada per representar un punt de coordenades
struct Point
{
float x, y; // Co-ordinate of point
};
/** Funció que es crida per fer el càlcul del nostre algorisme knn de manera seqüencial.
* @param arr refence points
* @param n number of reference points
* @param k number of points we want to use for the prediction
* @param p point we want to predict
*/
int classifyAPoint(Point arr[], int n, int k, Point p, float val[])
{
float distances[n];
// Fill distances of all points from p
for (int i = 0; i < n; i++)
distances[i] =
sqrt((arr[i].x - p.x) * (arr[i].x - p.x) +
(arr[i].y - p.y) * (arr[i].y - p.y));
// Sort the Points by distance from p
quickSort(distances, val, 0, n-1);
// Now consider the first k elements and only two groups.
int freq1 = 0; // Frequency of group 0
int freq2 = 0; // Frequency of group 1
for (int i = 0; i < k; i++)
{
if (val[i] == 0)
freq1++;
else if (val[i] == 1)
freq2++;
}
printf ("freq1 is %d.\n", freq1);
printf ("freq2 is %d.\n", freq2);
return (freq1 > freq2 ? 0 : 1);
}
//Funció per organitzar i inicialitzar els vectors del host que es necessitaran per fer càlculs al device (coordenades i valor(0,1)).
void InitHostInput(Point arr[], int n, float val[], Point p, float *ref_points_host_x, float *ref_points_host_y, float *ref_points_host_val) {
for (int i=0; i<n; i++) {
ref_points_host_x[i] = arr[i].x;
ref_points_host_y[i] = arr[i].y;
ref_points_host_val[i] = val[i];
}
}
//Funció per inicialitzar a 0 els valors de freqüència del host.
void InitHostFreq(unsigned int *freq_host) {
freq_host[0] = 0;
freq_host[1] = 0;
}
//Kernel per calcular la distància euclediana entre un punt concret(p) i tots de referència.
__global__ void calculateDistance(Point p, float *ref_points_dev_x, float *ref_points_dev_y, float *result_prediction_dev) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Fill distances of all points from p
result_prediction_dev[i] =
sqrt((ref_points_dev_x[i] - p.x) * (ref_points_dev_x[i] - p.x) +
(ref_points_dev_y[i] - p.y) * (ref_points_dev_y[i] - p.y));
}
//Kernel per calcular les freqüencies dels valors 0 i 1.
__global__ void calculateFreq(float *ref_points_host_val, unsigned int *freq_dev) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = ref_points_host_val[i];
atomicAdd(&freq_dev[j], 1);
}
//Funció que es crida per fer el càlcul del nostre algorisme knn utilitzant CUDA i kernels.
int classifyAPointCUDA(Point arr[], float val[], int n, int k, Point p)
{
unsigned int numBytes;
unsigned int nBlocks, nThreads;
int chunkSize_sort;
unsigned int nBytes_sort;
unsigned int nBlocks_sort, nThreads_sort;
float TiempoKernelDistance, TiempoSort, TiempoKernelFreq, TiempoAllOperations, TiempoProva;
cudaEvent_t E0, E1, E2, E3, E4, E5, E6, E7;
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventCreate(&E2);
cudaEventCreate(&E3);
cudaEventCreate(&E4);
cudaEventCreate(&E5);
cudaEventCreate(&E6);
cudaEventCreate(&E7);
cudaEventRecord(E6, 0);
float *ref_points_dev_x = NULL;
float *ref_points_dev_y = NULL;
float *ref_points_dev_val = NULL;
float *result_prediction_dev = NULL;
float *ref_points_host_x = NULL;
float *ref_points_host_y = NULL;
float *ref_points_host_val = NULL;
float *result_prediction_host = NULL;
float *arrSorted_h, *arrSortedF_h;
float *arrSorted_d, *arrSortedF_d;
float *arrSorted2_h, *arrSortedF2_h;
float *arrSorted2_d, *arrSortedF2_d;
unsigned int *freq_dev = NULL;
unsigned int *freq_host = NULL;
// numero de Threads
nThreads = THREADS;
// numero de Blocks en cada dimension
nBlocks = (n+nThreads-1)/nThreads;
printf("nBlocks = %d \n", nBlocks);
numBytes = nBlocks * nThreads * sizeof(float);
printf("numBytes = %d \n", numBytes);
nThreads_sort = 128;
nBlocks_sort = 2;
chunkSize_sort = n/(nThreads_sort*nBlocks_sort);
nBytes_sort = n * sizeof(float);
if (PINNED) {
// Obtiene Memoria [pinned] en el host
cudaMallocHost((float**)&ref_points_host_x, numBytes);
cudaMallocHost((float**)&ref_points_host_y, numBytes);
cudaMallocHost((float**)&ref_points_host_val, nBytes_sort);
cudaMallocHost((float**)&result_prediction_host, nBytes_sort);
cudaMallocHost((float**)&arrSorted_h, nBytes_sort);
cudaMallocHost((float**)&arrSortedF_h, nBytes_sort);
cudaMallocHost((float**)&arrSorted2_h, nBytes_sort);
cudaMallocHost((float**)&arrSortedF2_h, nBytes_sort);
cudaMallocHost((unsigned int**)&freq_host, sizeof(unsigned int)*2);
} else {
// Obtener Memoria en el host
ref_points_host_x = (float*) malloc(numBytes);
ref_points_host_y = (float*) malloc(numBytes);
ref_points_host_val = (float*) malloc(nBytes_sort);
result_prediction_host = (float*) malloc(nBytes_sort);
arrSorted_h = (float*) malloc(nBytes_sort);
arrSortedF_h = (float*) malloc(nBytes_sort);
arrSorted2_h = (float*) malloc(nBytes_sort);
arrSortedF2_h = (float*) malloc(nBytes_sort);
freq_host = (unsigned int*) malloc(sizeof(unsigned int)*2);
}
InitHostInput(arr, n, val, p, ref_points_host_x, ref_points_host_y, ref_points_host_val);
InitHostFreq(freq_host);
// Obtener Memoria en el device
cudaMalloc((float**)&ref_points_dev_x, numBytes);
cudaMalloc((float**)&ref_points_dev_y, numBytes);
cudaMalloc((float**)&ref_points_dev_val, nBytes_sort);
cudaMalloc((float**)&result_prediction_dev, nBytes_sort);
cudaMalloc((float **) &arrSorted_d, nBytes_sort);
cudaMalloc((float **) &arrSortedF_d, nBytes_sort);
cudaMalloc((float **) &arrSorted2_d, nBytes_sort);
cudaMalloc((float **) &arrSortedF2_d, nBytes_sort);
cudaMalloc((unsigned int**)&freq_dev, sizeof(unsigned int)*2);
// Copiar datos desde el host en el device
cudaMemcpy(ref_points_dev_x, ref_points_host_x, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy(ref_points_dev_y, ref_points_host_y, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy(ref_points_dev_val, ref_points_host_val, nBytes_sort, cudaMemcpyHostToDevice);
cudaMemcpy(freq_dev, freq_host, sizeof(unsigned int)*2, cudaMemcpyHostToDevice);
cudaEventRecord(E0, 0);
// Ejecutar el kernel
calculateDistance<<<nBlocks, nThreads>>>(p, ref_points_dev_x, ref_points_dev_y, result_prediction_dev);
cudaEventRecord(E1, 0); cudaEventSynchronize(E1);
cudaEventElapsedTime(&TiempoKernelDistance, E0, E1);
// Obtener el resultado desde el host
//cudaMemcpy(result_prediction_host, result_prediction_dev, numBytes, cudaMemcpyDeviceToHost);
// Liberar Memoria del device
cudaFree(ref_points_dev_x);
cudaFree(ref_points_dev_y);
cudaEventRecord(E4, 0);
// Sort the Points by distance from p
printf("Invocació Kernel Sort <<<nBlocks, nKernels>>> (N): <<<%d, %d>>> (%d)\n", nBlocks_sort, nThreads_sort, n);
callMergeSort<<<nBlocks_sort, nThreads_sort>>>(result_prediction_dev, arrSorted_d, ref_points_dev_val, arrSorted2_d, chunkSize_sort, n);
int auxChunkSize = chunkSize_sort*2;
int auxBlock = nBlocks_sort;
int auxThread = nThreads_sort/2;
cudaFree(result_prediction_dev);
cudaFree(ref_points_dev_val);
while (auxChunkSize < n) {
//printf("Invocació Kernel Sort 2 <<<nBlocks, nKernels>>> (N): <<<%d, %d>>> (%d)\n", auxBlock, auxThread, n);
callMerge<<<auxBlock, auxThread>>>(arrSorted_d, arrSortedF_d, arrSorted2_d, arrSortedF2_d, auxChunkSize, n);
auxChunkSize = auxChunkSize*2;
//auxThread = auxThread/2;
}
cudaMemcpy(arrSorted_h, arrSortedF_d, nBytes_sort, cudaMemcpyDeviceToHost);
cudaMemcpy(arrSorted2_h, arrSortedF2_d, nBytes_sort, cudaMemcpyDeviceToHost);
cudaFree(arrSorted_d);
cudaFree(arrSortedF_d);
sortBlocks(arrSorted_h, arrSortedF_h, arrSorted2_h, arrSortedF2_h, n);
//quickSort(result_prediction_host, ref_points_host_val, 0, n-1);
/*
for(int i = 0; i < n; i++){
printf("L'element: %d\n", i);
printf("La distancia: %f\n", result_prediction_host[i]);
printf("La x: %f\n", ref_points_host_val[i]);
}
*/
cudaEventRecord(E5, 0); cudaEventSynchronize(E5);
cudaEventElapsedTime(&TiempoSort, E4, E5);
cudaMemcpy(arrSortedF2_d, arrSortedF2_h, nBytes_sort, cudaMemcpyHostToDevice);
cudaEventRecord(E2, 0);
// Ejecutar el kernel
calculateFreq<<<k, 1>>>(arrSortedF2_d, freq_dev);
cudaEventRecord(E3, 0); cudaEventSynchronize(E3);
cudaEventElapsedTime(&TiempoKernelFreq, E2, E3);
TiempoAllOperations = TiempoKernelDistance + TiempoSort + TiempoKernelFreq;
cudaMemcpy(freq_host, freq_dev, sizeof(unsigned int)*2, cudaMemcpyDeviceToHost);
cudaFree(ref_points_dev_val);
cudaFree(freq_dev);
int result = -1;
if(freq_host[0] > freq_host[1]) result = 0;
else result = 1;
printf ("freq1 is %d.\n", freq_host[0]);
printf ("freq2 is %d.\n", freq_host[1]);
printf ("The value classified to unknown point"
" is %d.\n", result);
printf("Invocació Kernel <<<nBlocks, nKernels>>> (N): <<<%d, %d>>> (%d)\n", nBlocks, nThreads, n);
printf("Tiempo Kernel calculo distancia (00): %4.6f milseg\n", TiempoKernelDistance);
printf("Tiempo Kernel calculo freq (00): %4.6f milseg\n", TiempoKernelFreq);
printf("Tiempo Sort (00): %4.6f milseg\n", TiempoSort);
printf("Tiempo todas las operaciones (00): %4.6f milseg\n", TiempoAllOperations);
if (PINNED) printf("Usando Pinned Memory\n");
else printf("NO usa Pinned Memory\n");
if (PINNED) {
cudaFreeHost(ref_points_host_x); cudaFreeHost(ref_points_host_y); cudaFreeHost(ref_points_host_val);
cudaFreeHost(result_prediction_host); cudaFreeHost(freq_host); cudaFreeHost(arrSorted_h);cudaFreeHost(arrSortedF_h); cudaFreeHost(arrSorted2_h);cudaFreeHost(arrSortedF2_h);
} else {
free(ref_points_host_x); free(ref_points_host_y); free(ref_points_host_val); free(result_prediction_host);
free(arrSorted_h); free(arrSortedF_h); free(freq_host); free(arrSorted2_h); free(arrSortedF2_h);
}
cudaEventRecord(E7, 0); cudaEventSynchronize(E7);
cudaEventElapsedTime(&TiempoProva, E6, E7);
printf("Temps total CUDA: %4.6f milseg\n", TiempoProva);
return result;
}
//Inicialització de la k (el número de punts més propers que es vol tenir en compte) per defecte.
void InitKDefecte(int *k) {
// Parameter to decide group of the testing point
(*k) = 15;
}
//Inicialització del punt p (el punt sobre el que es vol fer la predicció de valor) per defecte.
void InitTestPointDefecte(struct Point *p) {
//Test Point
p->x = 2.5;
p->y = 7;
}
//Inicialització conjunta de la k i la p per defecte.
void InitDefecte(int *k, struct Point *p) {
InitKDefecte(k);
InitTestPointDefecte(p);
}
//Funció principal del nostre programa
int main(int argc, char** argv)
{
srand(time(0));
//Es declaren les variables
int n, k;
struct Point p;
//S'inicialitza la K, i les coordenades del Testing point
if (argc == 1) { InitDefecte(&k, &p); }
else if (argc == 2) { k = atoi(argv[1]); InitTestPointDefecte(&p); }
else if (argc == 4) { k = atoi(argv[1]); p.x = atof(argv[2]); p.y = atof(argv[3]);}
else { printf("Usage: ./exe k TestPointCoordenadaX TestPointCoordenadaY\n"); exit(0); }
//Es crea l'estructura sobre la qual es vol fer la predicció
n = 131072; // Number of data points
Point arr[n];
float val_seq[n];
float val_cuda[n];
for(int i = 0; i < n; ++i) {
arr[i].x = rand();
arr[i].y = rand();
val_seq[i] = rand() % 2;
val_cuda[i] = val_seq[i];
}
/*for(int i = 0; i < n; i++){
printf("x: %lf\n", arr[i].x);
printf("y: %lf\n", arr[i].y);
printf("val: %f\n", val[i]);
}*/
printf("k = %d \n", k);
printf("The Testing Point values are:");
printf(" x = %f", p.x);
printf(" and");
printf(" y = %f", p.y);
printf("\n");
printf("\n");
printf("Programa Seqüencial -------------------------------------------------- \n");
printf("\n");
// Calculate the time taken by the sequential code: classifyAPoint function
clock_t t;
t = clock();
int result = classifyAPoint(arr, n, k, p, val_seq);
t = clock() - t;
float time_taken = ((float)t)/(CLOCKS_PER_SEC/1000); // in mseconds
printf ("The value classified to unknown point"
" is %d.\n", result);
printf ("Temps total seqüencial:"
" %lf milseg.\n", time_taken);
printf("\n");
printf("Programa CUDA -------------------------------------------------------- \n");
printf("\n");
int result2 = classifyAPointCUDA(arr,val_cuda, n, k, p);
printf ("The value classified to unknown point"
" is %d.\n", result2);
}
|
1b0aaad4e0c561f22d5518e423fec596aa5bb0e7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "binarize_filters_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *filters = NULL;
hipMalloc(&filters, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int size = XSIZE*YSIZE;
float *binary = NULL;
hipMalloc(&binary, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
binarize_filters_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, filters,n,size,binary);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
binarize_filters_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, filters,n,size,binary);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
binarize_filters_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, filters,n,size,binary);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1b0aaad4e0c561f22d5518e423fec596aa5bb0e7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "binarize_filters_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *filters = NULL;
cudaMalloc(&filters, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int size = XSIZE*YSIZE;
float *binary = NULL;
cudaMalloc(&binary, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
binarize_filters_kernel<<<gridBlock,threadBlock>>>(filters,n,size,binary);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
binarize_filters_kernel<<<gridBlock,threadBlock>>>(filters,n,size,binary);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
binarize_filters_kernel<<<gridBlock,threadBlock>>>(filters,n,size,binary);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ba155882ca20707e5e10f89ec5a76288e38c16fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "definition.cuh"
#include "../Util/prefixSumAsm.cu"
namespace scc4k{
extern __shared__ unsigned char SMem[];
namespace FrontierWrite {
enum WriteMode { SIMPLE, SHARED_WARP, SHARED_BLOCK };
__device__ __forceinline__ void FrontierReserve_Warp(int* GlobalCounter, int founds, int& n, int &totalWarp, int& globalOffset) {
n = founds;
totalWarp = warpExclusiveScan<32>(n);
int oldCounter;
if (LaneID() == 0) // && totale != 0)
oldCounter = atomicAdd(GlobalCounter, totalWarp);
globalOffset = __shfl(oldCounter, 0);
}
template<int BlockDim>
__device__ __forceinline__ void FrontierReserve_Block(int* GlobalCounter, int founds, int& n, int &totalBlock, int& globalOffset) {
int* SM2 = (int*) SMem;
n = founds;
const int warpId = WarpID();
SM2[warpId] = warpExclusiveScan<32>(n);
__syncthreads();
if (Tid < BlockDim / 32) {
int sum = SM2[Tid];
const int total = warpExclusiveScan<BlockDim / 32>(sum);
if (Tid == 0) {
SM2[32] = total;
SM2[33] = atomicAdd(GlobalCounter, total);
}
SM2[Tid] = sum;
}
__syncthreads();
n += SM2[warpId];
totalBlock = SM2[32];
globalOffset = SM2[33];
}
template<int BlockDim, WriteMode mode>
__device__ __forceinline__ void Write(int* devFrontier, int* GlobalCounter, int* Queue, int founds) {
int n, total, globalOffset;
if (mode == SIMPLE || mode == SHARED_WARP)
FrontierReserve_Warp(GlobalCounter, founds, n, total, globalOffset);
if (mode == SIMPLE) {
const int pos = globalOffset + n;
for (int i = 0; i < founds; i++)
devFrontier[pos + i] = Queue[i];
}
else if (mode == SHARED_WARP) {
int* SM2 = (int*) &SMem[ WarpID() * SMem_Per_Warp ];
int j = 0;
while (total > 0) {
while (j < founds && n + j < IntSMem_Per_Warp) {
SM2[n + j] = Queue[j];
j++;
}
#pragma unroll
for (int i = 0; i < IntSMem_Per_Thread; ++i) {
const int index = LaneID() + i * 32;
if (index < total)
devFrontier[globalOffset + index] = SM2[index];
}
n -= IntSMem_Per_Warp;
total -= IntSMem_Per_Warp;
globalOffset += IntSMem_Per_Warp;
}
}
else if (mode == SHARED_BLOCK) {
FrontierReserve_Block<BlockDim>(GlobalCounter, founds, n, total, globalOffset);
int* SM2 = (int*) SMem;
int j = 0;
while (total > 0) {
__syncthreads();
while (j < founds && n + j < IntSMem_Per_Block(BlockDim) ) {
SM2[n + j] = Queue[j];
j++;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < IntSMem_Per_Thread; ++i) {
const int index = Tid + i * BlockDim;
if (index < total)
devFrontier[globalOffset + index] = SM2[index];
}
n -= IntSMem_Per_Block(BlockDim);
total -= IntSMem_Per_Block(BlockDim);
globalOffset += IntSMem_Per_Block(BlockDim);
}
}
}
}
}
| ba155882ca20707e5e10f89ec5a76288e38c16fa.cu | #pragma once
#include "definition.cuh"
#include "../Util/prefixSumAsm.cu"
namespace scc4k{
extern __shared__ unsigned char SMem[];
namespace FrontierWrite {
enum WriteMode { SIMPLE, SHARED_WARP, SHARED_BLOCK };
__device__ __forceinline__ void FrontierReserve_Warp(int* GlobalCounter, int founds, int& n, int &totalWarp, int& globalOffset) {
n = founds;
totalWarp = warpExclusiveScan<32>(n);
int oldCounter;
if (LaneID() == 0) // && totale != 0)
oldCounter = atomicAdd(GlobalCounter, totalWarp);
globalOffset = __shfl(oldCounter, 0);
}
template<int BlockDim>
__device__ __forceinline__ void FrontierReserve_Block(int* GlobalCounter, int founds, int& n, int &totalBlock, int& globalOffset) {
int* SM2 = (int*) SMem;
n = founds;
const int warpId = WarpID();
SM2[warpId] = warpExclusiveScan<32>(n);
__syncthreads();
if (Tid < BlockDim / 32) {
int sum = SM2[Tid];
const int total = warpExclusiveScan<BlockDim / 32>(sum);
if (Tid == 0) {
SM2[32] = total;
SM2[33] = atomicAdd(GlobalCounter, total);
}
SM2[Tid] = sum;
}
__syncthreads();
n += SM2[warpId];
totalBlock = SM2[32];
globalOffset = SM2[33];
}
template<int BlockDim, WriteMode mode>
__device__ __forceinline__ void Write(int* devFrontier, int* GlobalCounter, int* Queue, int founds) {
int n, total, globalOffset;
if (mode == SIMPLE || mode == SHARED_WARP)
FrontierReserve_Warp(GlobalCounter, founds, n, total, globalOffset);
if (mode == SIMPLE) {
const int pos = globalOffset + n;
for (int i = 0; i < founds; i++)
devFrontier[pos + i] = Queue[i];
}
else if (mode == SHARED_WARP) {
int* SM2 = (int*) &SMem[ WarpID() * SMem_Per_Warp ];
int j = 0;
while (total > 0) {
while (j < founds && n + j < IntSMem_Per_Warp) {
SM2[n + j] = Queue[j];
j++;
}
#pragma unroll
for (int i = 0; i < IntSMem_Per_Thread; ++i) {
const int index = LaneID() + i * 32;
if (index < total)
devFrontier[globalOffset + index] = SM2[index];
}
n -= IntSMem_Per_Warp;
total -= IntSMem_Per_Warp;
globalOffset += IntSMem_Per_Warp;
}
}
else if (mode == SHARED_BLOCK) {
FrontierReserve_Block<BlockDim>(GlobalCounter, founds, n, total, globalOffset);
int* SM2 = (int*) SMem;
int j = 0;
while (total > 0) {
__syncthreads();
while (j < founds && n + j < IntSMem_Per_Block(BlockDim) ) {
SM2[n + j] = Queue[j];
j++;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < IntSMem_Per_Thread; ++i) {
const int index = Tid + i * BlockDim;
if (index < total)
devFrontier[globalOffset + index] = SM2[index];
}
n -= IntSMem_Per_Block(BlockDim);
total -= IntSMem_Per_Block(BlockDim);
globalOffset += IntSMem_Per_Block(BlockDim);
}
}
}
}
}
|
1d2a972351ae2c6006cb5db1ee62d38c2f265360.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/filters.hpp"
#include <cfloat>
#include <opencv2/gpu/device/scan.hpp>
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void resize(const Ptr2D src, float fx, float fy, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
const float xcoo = x * fx;
const float ycoo = y * fy;
dst(y, x) = saturate_cast<T>(src(ycoo, xcoo));
}
}
template <typename Ptr2D, typename T> __global__ void resize_area(const Ptr2D src, float fx, float fy, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
dst(y, x) = saturate_cast<T>(src(y, x));
}
}
template <template <typename> class Filter, typename T> struct ResizeDispatcherStream
{
static void call(PtrStepSz<T> src, float fx, float fy, PtrStepSz<T> dst, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdReplicate<T> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, BrdReplicate<T> > > filteredSrc(brdSrc, fx, fy);
hipLaunchKernelGGL(( resize), dim3(grid), dim3(block), 0, stream, filteredSrc, fx, fy, dst);
cudaSafeCall( hipGetLastError() );
}
};
template <typename T> struct ResizeDispatcherStream<AreaFilter, T>
{
static void call(PtrStepSz<T> src, float fx, float fy, PtrStepSz<T> dst, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdConstant<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdConstant<T> > brdSrc(src, brd);
AreaFilter< BorderReader< PtrStep<T>, BrdConstant<T> > > filteredSrc(brdSrc, fx, fy);
hipLaunchKernelGGL(( resize_area), dim3(grid), dim3(block), 0, stream, filteredSrc, fx, fy, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
};
template <typename T> struct ResizeDispatcherStream<IntegerAreaFilter, T>
{
static void call(PtrStepSz<T> src, float fx, float fy, PtrStepSz<T> dst, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdConstant<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdConstant<T> > brdSrc(src, brd);
IntegerAreaFilter< BorderReader< PtrStep<T>, BrdConstant<T> > > filteredSrc(brdSrc, fx, fy);
hipLaunchKernelGGL(( resize_area), dim3(grid), dim3(block), 0, stream, filteredSrc, fx, fy, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
};
template <template <typename> class Filter, typename T> struct ResizeDispatcherNonStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSz<T> dst)
{
(void)srcWhole;
(void)xoff;
(void)yoff;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdReplicate<T> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, BrdReplicate<T> > > filteredSrc(brdSrc);
hipLaunchKernelGGL(( resize), dim3(grid), dim3(block), 0, 0, filteredSrc, fx, fy, dst);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
};
#define OPENCV_GPU_IMPLEMENT_RESIZE_TEX(type) \
texture< type , hipTextureType2D> tex_resize_ ## type (0, hipFilterModePoint, hipAddressModeClamp); \
struct tex_resize_ ## type ## _reader \
{ \
typedef type elem_type; \
typedef int index_type; \
const int xoff; \
const int yoff; \
__host__ tex_resize_ ## type ## _reader(int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_resize_ ## type, x + xoff, y + yoff); \
} \
}; \
template <template <typename> class Filter> struct ResizeDispatcherNonStream<Filter, type > \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSz< type > dst) \
{ \
dim3 block(32, 8); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_resize_ ## type, srcWhole); \
tex_resize_ ## type ## _reader texSrc(xoff, yoff); \
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows) \
{ \
Filter<tex_resize_ ## type ## _reader> filteredSrc(texSrc); \
hipLaunchKernelGGL(( resize), dim3(grid), dim3(block), 0, 0, filteredSrc, fx, fy, dst); \
} \
else \
{ \
BrdReplicate< type > brd(src.rows, src.cols); \
BorderReader<tex_resize_ ## type ## _reader, BrdReplicate< type > > brdSrc(texSrc, brd); \
Filter< BorderReader<tex_resize_ ## type ## _reader, BrdReplicate< type > > > filteredSrc(brdSrc); \
hipLaunchKernelGGL(( resize), dim3(grid), dim3(block), 0, 0, filteredSrc, fx, fy, dst); \
} \
cudaSafeCall( hipGetLastError() ); \
cudaSafeCall( hipDeviceSynchronize() ); \
} \
};
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(uchar)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(uchar4)
//OPENCV_GPU_IMPLEMENT_RESIZE_TEX(schar)
//OPENCV_GPU_IMPLEMENT_RESIZE_TEX(char4)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(ushort)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(ushort4)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(short)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(short4)
//OPENCV_GPU_IMPLEMENT_RESIZE_TEX(int)
//OPENCV_GPU_IMPLEMENT_RESIZE_TEX(int4)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(float)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(float4)
#undef OPENCV_GPU_IMPLEMENT_RESIZE_TEX
template <template <typename> class Filter, typename T> struct ResizeDispatcher
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSz<T> dst, hipStream_t stream)
{
if (stream == 0)
ResizeDispatcherNonStream<Filter, T>::call(src, srcWhole, xoff, yoff, fx, fy, dst);
else
ResizeDispatcherStream<Filter, T>::call(src, fx, fy, dst, stream);
}
};
template <typename T> struct ResizeDispatcher<AreaFilter, T>
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSz<T> dst, hipStream_t stream)
{
(void)srcWhole;
(void)xoff;
(void)yoff;
int iscale_x = (int)round(fx);
int iscale_y = (int)round(fy);
if( std::abs(fx - iscale_x) < FLT_MIN && std::abs(fy - iscale_y) < FLT_MIN)
ResizeDispatcherStream<IntegerAreaFilter, T>::call(src, fx, fy, dst, stream);
else
ResizeDispatcherStream<AreaFilter, T>::call(src, fx, fy, dst, stream);
}
};
template <typename T> void resize_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy,
PtrStepSzb dst, int interpolation, hipStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSz<T> dst, hipStream_t stream);
static const caller_t callers[4] =
{
ResizeDispatcher<PointFilter, T>::call,
ResizeDispatcher<LinearFilter, T>::call,
ResizeDispatcher<CubicFilter, T>::call,
ResizeDispatcher<AreaFilter, T>::call
};
// chenge to linear if area interpolation upscaling
if (interpolation == 3 && (fx <= 1.f || fy <= 1.f))
interpolation = 1;
callers[interpolation](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(srcWhole), xoff, yoff, fx, fy,
static_cast< PtrStepSz<T> >(dst), stream);
}
template void resize_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<uchar2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<schar>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<char2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<char3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<char4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<ushort2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<short2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<int >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<int2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<int3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<int4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
//template void resize_gpu<float2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template void resize_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, hipStream_t stream);
template<typename T> struct scan_traits{};
template<> struct scan_traits<uchar>
{
typedef float scan_line_type;
};
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ | 1d2a972351ae2c6006cb5db1ee62d38c2f265360.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/filters.hpp"
#include <cfloat>
#include <opencv2/gpu/device/scan.hpp>
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void resize(const Ptr2D src, float fx, float fy, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
const float xcoo = x * fx;
const float ycoo = y * fy;
dst(y, x) = saturate_cast<T>(src(ycoo, xcoo));
}
}
template <typename Ptr2D, typename T> __global__ void resize_area(const Ptr2D src, float fx, float fy, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
dst(y, x) = saturate_cast<T>(src(y, x));
}
}
template <template <typename> class Filter, typename T> struct ResizeDispatcherStream
{
static void call(PtrStepSz<T> src, float fx, float fy, PtrStepSz<T> dst, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdReplicate<T> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, BrdReplicate<T> > > filteredSrc(brdSrc, fx, fy);
resize<<<grid, block, 0, stream>>>(filteredSrc, fx, fy, dst);
cudaSafeCall( cudaGetLastError() );
}
};
template <typename T> struct ResizeDispatcherStream<AreaFilter, T>
{
static void call(PtrStepSz<T> src, float fx, float fy, PtrStepSz<T> dst, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdConstant<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdConstant<T> > brdSrc(src, brd);
AreaFilter< BorderReader< PtrStep<T>, BrdConstant<T> > > filteredSrc(brdSrc, fx, fy);
resize_area<<<grid, block, 0, stream>>>(filteredSrc, fx, fy, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
template <typename T> struct ResizeDispatcherStream<IntegerAreaFilter, T>
{
static void call(PtrStepSz<T> src, float fx, float fy, PtrStepSz<T> dst, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdConstant<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdConstant<T> > brdSrc(src, brd);
IntegerAreaFilter< BorderReader< PtrStep<T>, BrdConstant<T> > > filteredSrc(brdSrc, fx, fy);
resize_area<<<grid, block, 0, stream>>>(filteredSrc, fx, fy, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
template <template <typename> class Filter, typename T> struct ResizeDispatcherNonStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSz<T> dst)
{
(void)srcWhole;
(void)xoff;
(void)yoff;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdReplicate<T> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, BrdReplicate<T> > > filteredSrc(brdSrc);
resize<<<grid, block>>>(filteredSrc, fx, fy, dst);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
};
#define OPENCV_GPU_IMPLEMENT_RESIZE_TEX(type) \
texture< type , cudaTextureType2D> tex_resize_ ## type (0, cudaFilterModePoint, cudaAddressModeClamp); \
struct tex_resize_ ## type ## _reader \
{ \
typedef type elem_type; \
typedef int index_type; \
const int xoff; \
const int yoff; \
__host__ tex_resize_ ## type ## _reader(int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_resize_ ## type, x + xoff, y + yoff); \
} \
}; \
template <template <typename> class Filter> struct ResizeDispatcherNonStream<Filter, type > \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSz< type > dst) \
{ \
dim3 block(32, 8); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_resize_ ## type, srcWhole); \
tex_resize_ ## type ## _reader texSrc(xoff, yoff); \
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows) \
{ \
Filter<tex_resize_ ## type ## _reader> filteredSrc(texSrc); \
resize<<<grid, block>>>(filteredSrc, fx, fy, dst); \
} \
else \
{ \
BrdReplicate< type > brd(src.rows, src.cols); \
BorderReader<tex_resize_ ## type ## _reader, BrdReplicate< type > > brdSrc(texSrc, brd); \
Filter< BorderReader<tex_resize_ ## type ## _reader, BrdReplicate< type > > > filteredSrc(brdSrc); \
resize<<<grid, block>>>(filteredSrc, fx, fy, dst); \
} \
cudaSafeCall( cudaGetLastError() ); \
cudaSafeCall( cudaDeviceSynchronize() ); \
} \
};
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(uchar)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(uchar4)
//OPENCV_GPU_IMPLEMENT_RESIZE_TEX(schar)
//OPENCV_GPU_IMPLEMENT_RESIZE_TEX(char4)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(ushort)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(ushort4)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(short)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(short4)
//OPENCV_GPU_IMPLEMENT_RESIZE_TEX(int)
//OPENCV_GPU_IMPLEMENT_RESIZE_TEX(int4)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(float)
OPENCV_GPU_IMPLEMENT_RESIZE_TEX(float4)
#undef OPENCV_GPU_IMPLEMENT_RESIZE_TEX
template <template <typename> class Filter, typename T> struct ResizeDispatcher
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSz<T> dst, cudaStream_t stream)
{
if (stream == 0)
ResizeDispatcherNonStream<Filter, T>::call(src, srcWhole, xoff, yoff, fx, fy, dst);
else
ResizeDispatcherStream<Filter, T>::call(src, fx, fy, dst, stream);
}
};
template <typename T> struct ResizeDispatcher<AreaFilter, T>
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSz<T> dst, cudaStream_t stream)
{
(void)srcWhole;
(void)xoff;
(void)yoff;
int iscale_x = (int)round(fx);
int iscale_y = (int)round(fy);
if( std::abs(fx - iscale_x) < FLT_MIN && std::abs(fy - iscale_y) < FLT_MIN)
ResizeDispatcherStream<IntegerAreaFilter, T>::call(src, fx, fy, dst, stream);
else
ResizeDispatcherStream<AreaFilter, T>::call(src, fx, fy, dst, stream);
}
};
template <typename T> void resize_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy,
PtrStepSzb dst, int interpolation, cudaStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSz<T> dst, cudaStream_t stream);
static const caller_t callers[4] =
{
ResizeDispatcher<PointFilter, T>::call,
ResizeDispatcher<LinearFilter, T>::call,
ResizeDispatcher<CubicFilter, T>::call,
ResizeDispatcher<AreaFilter, T>::call
};
// chenge to linear if area interpolation upscaling
if (interpolation == 3 && (fx <= 1.f || fy <= 1.f))
interpolation = 1;
callers[interpolation](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(srcWhole), xoff, yoff, fx, fy,
static_cast< PtrStepSz<T> >(dst), stream);
}
template void resize_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<uchar2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<schar>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<char2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<char3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<char4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<ushort2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<short2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<float2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template void resize_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float fx, float fy, PtrStepSzb dst, int interpolation, cudaStream_t stream);
template<typename T> struct scan_traits{};
template<> struct scan_traits<uchar>
{
typedef float scan_line_type;
};
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ |
7057a4872d558a0a4b27f90c915faf00a68a73d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../kernels/iteration_step.cu"
#include "../common/statistics.h"
int main(int argc, char *argv[])
{
CA *d_ca = new CA();
CA *h_ca = new CA();
double *headsWrite;
h_ca->heads = new double[ROWS * COLS]();
h_ca->Sy = new double[ROWS * COLS]();
h_ca->K = new double[ROWS * COLS]();
h_ca->sources = new double[ROWS * COLS]();
initializeCA(h_ca);
allocateMemory(d_ca, headsWrite);
copyDataFromCpuToGpu(h_ca, d_ca, headsWrite);
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
const int blockCount = ceil((double) (ROWS * COLS) / (BLOCK_SIZE * BLOCK_SIZE));
int gridSize = ceil(sqrt(blockCount));
dim3 gridDims(gridSize, gridSize);
std::vector<StatPoint> stats;
Timer stepTimer;
stepTimer.start();
for (size_t i{}; i < SIMULATION_ITERATIONS; ++i)
{
#ifdef STANDARD
hipLaunchKernelGGL(( kernels::standard_step) , dim3(gridDims), dim3(blockSize) , 0, 0, *d_ca, headsWrite);
#endif
#ifdef HYBRID
hipLaunchKernelGGL(( kernels::hybrid_step) , dim3(gridDims), dim3(blockSize) , 0, 0, *d_ca, headsWrite);
#endif
#ifdef SHARED
hipLaunchKernelGGL(( kernels::shared_step) , dim3(gridDims), dim3(blockSize) , 0, 0, *d_ca, headsWrite);
#endif
ERROR_CHECK(hipDeviceSynchronize());
double *tmpHeads = d_ca->heads;
d_ca->heads = headsWrite;
headsWrite = tmpHeads;
if (i % STATISTICS_WRITE_FREQ == STATISTICS_WRITE_FREQ - 1)
{
stepTimer.stop();
auto stat = new StatPoint();
stat->stepTime = stepTimer.elapsedNanoseconds();
stats.push_back(*stat);
stepTimer.start();
}
}
if (WRITE_OUTPUT_TO_FILE)
{
copyDataFromGpuToCpu(h_ca, d_ca);
saveHeadsInFile(h_ca->heads, argv[0]);
}
if (WRITE_STATISTICS_TO_FILE)
{
writeStatisticsToFile(stats, argv[0]);
}
freeAllocatedMemory(d_ca, headsWrite);
}
| 7057a4872d558a0a4b27f90c915faf00a68a73d5.cu | #include "../kernels/iteration_step.cu"
#include "../common/statistics.h"
int main(int argc, char *argv[])
{
CA *d_ca = new CA();
CA *h_ca = new CA();
double *headsWrite;
h_ca->heads = new double[ROWS * COLS]();
h_ca->Sy = new double[ROWS * COLS]();
h_ca->K = new double[ROWS * COLS]();
h_ca->sources = new double[ROWS * COLS]();
initializeCA(h_ca);
allocateMemory(d_ca, headsWrite);
copyDataFromCpuToGpu(h_ca, d_ca, headsWrite);
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
const int blockCount = ceil((double) (ROWS * COLS) / (BLOCK_SIZE * BLOCK_SIZE));
int gridSize = ceil(sqrt(blockCount));
dim3 gridDims(gridSize, gridSize);
std::vector<StatPoint> stats;
Timer stepTimer;
stepTimer.start();
for (size_t i{}; i < SIMULATION_ITERATIONS; ++i)
{
#ifdef STANDARD
kernels::standard_step <<< gridDims, blockSize >>>(*d_ca, headsWrite);
#endif
#ifdef HYBRID
kernels::hybrid_step <<< gridDims, blockSize >>>(*d_ca, headsWrite);
#endif
#ifdef SHARED
kernels::shared_step <<< gridDims, blockSize >>>(*d_ca, headsWrite);
#endif
ERROR_CHECK(cudaDeviceSynchronize());
double *tmpHeads = d_ca->heads;
d_ca->heads = headsWrite;
headsWrite = tmpHeads;
if (i % STATISTICS_WRITE_FREQ == STATISTICS_WRITE_FREQ - 1)
{
stepTimer.stop();
auto stat = new StatPoint();
stat->stepTime = stepTimer.elapsedNanoseconds();
stats.push_back(*stat);
stepTimer.start();
}
}
if (WRITE_OUTPUT_TO_FILE)
{
copyDataFromGpuToCpu(h_ca, d_ca);
saveHeadsInFile(h_ca->heads, argv[0]);
}
if (WRITE_STATISTICS_TO_FILE)
{
writeStatisticsToFile(stats, argv[0]);
}
freeAllocatedMemory(d_ca, headsWrite);
}
|
6d39e377ee43e4087deaaf7dbc3adcaafb957439.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tiger/layers/neuron/dropout_layer.hpp"
#include "tiger/utils/math_function.hpp"
namespace tiger{
template <typename Dtype>
__global__ void dropout_forward_kernel(const int n, const Dtype* in, const unsigned int* mask,
const unsigned int threshold, const float scale, Dtype* out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
out[i] = in[i] * (mask[i] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::forward_gpu(const vector<Blob<Dtype>* >& bottom,
const vector<Blob<Dtype>* >& top){
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if(this->phase_ == TRAIN){
unsigned int* mask = static_cast<unsigned int*>(this->rand_vec_.mutable_gpu_data());
tiger_gpu_rng_uniform(count, mask);
hipLaunchKernelGGL(( dropout_forward_kernel<Dtype>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0,
count, bottom_data, mask, uint_thres_, scale_, top_data);
}
else{
for(int i = 0; i < count; i++){
top_data[i] = bottom_data[i];
}
}
}
template <typename Dtype>
__global__ void dropout_backward_kernel(const int n, const Dtype* in, const unsigned int* mask,
const unsigned int threshold, const float sacle, Dtype* out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
out[i] = in[i] * (mask[i] > threshold) * sacle;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::backward_gpu(const vector<Blob<Dtype>* >& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>* >& bottom){
if(!propagate_down[0]){
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const unsigned int* mask = rand_vec_.gpu_data();
const int count = bottom[0]->count();
if(this->phase_ == TRAIN){
hipLaunchKernelGGL(( dropout_backward_kernel<Dtype>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count,
top_diff, mask, uint_thres_, scale_, bottom_diff);
}
else{
for(int i = 0; i < count; i++){
bottom_diff[i] = top_diff[i];
}
}
}
template class DropoutLayer<float>;
template class DropoutLayer<double>;
}
| 6d39e377ee43e4087deaaf7dbc3adcaafb957439.cu | #include "tiger/layers/neuron/dropout_layer.hpp"
#include "tiger/utils/math_function.hpp"
namespace tiger{
template <typename Dtype>
__global__ void dropout_forward_kernel(const int n, const Dtype* in, const unsigned int* mask,
const unsigned int threshold, const float scale, Dtype* out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
out[i] = in[i] * (mask[i] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::forward_gpu(const vector<Blob<Dtype>* >& bottom,
const vector<Blob<Dtype>* >& top){
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if(this->phase_ == TRAIN){
unsigned int* mask = static_cast<unsigned int*>(this->rand_vec_.mutable_gpu_data());
tiger_gpu_rng_uniform(count, mask);
dropout_forward_kernel<Dtype><<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(
count, bottom_data, mask, uint_thres_, scale_, top_data);
}
else{
for(int i = 0; i < count; i++){
top_data[i] = bottom_data[i];
}
}
}
template <typename Dtype>
__global__ void dropout_backward_kernel(const int n, const Dtype* in, const unsigned int* mask,
const unsigned int threshold, const float sacle, Dtype* out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
out[i] = in[i] * (mask[i] > threshold) * sacle;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::backward_gpu(const vector<Blob<Dtype>* >& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>* >& bottom){
if(!propagate_down[0]){
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const unsigned int* mask = rand_vec_.gpu_data();
const int count = bottom[0]->count();
if(this->phase_ == TRAIN){
dropout_backward_kernel<Dtype><<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count,
top_diff, mask, uint_thres_, scale_, bottom_diff);
}
else{
for(int i = 0; i < count; i++){
bottom_diff[i] = top_diff[i];
}
}
}
template class DropoutLayer<float>;
template class DropoutLayer<double>;
}
|
39a7b850e92c2a1b895c87364e3f1caa071bc5bc.hip | // !!! This is a file automatically generated by hipify!!!
// ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
hipError_t e = hipGetLastError();
if (e != hipSuccess)
{
cout << endl << file << ", line " << line << ": " << hipGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
// perform the actual computation on GPU
__device__
void addArrays(float* d_a, float* d_b, float* d_c, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n) d_c[i] = d_a[i] + d_b[i];
}
// kernel to call from the main function
__global__
void addArraysKernel(float* d_a, float* d_b, float* d_c, int n)
{
addArrays(d_a, d_b, d_c, n);
}
int main(int argc, char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 20;
float *a = new float[n];
float *b = new float[n];
float *c = new float[n];
for(int i=0; i<n; i++)
{
a[i] = i;
b[i] = (i%5)+1;
c[i] = 0;
}
// CPU computation
for(int i=0; i<n; i++) c[i] = a[i] + b[i];
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// init c
for(int i=0; i<n; i++) c[i] = 0;
// GPU computation
// allocate memory on GPU
size_t nbytes = (size_t)(n)*sizeof(float);
float* d_a = NULL;
float* d_b = NULL;
float* d_c = NULL;
hipMalloc(&d_a, nbytes); CUDA_CHECK;
hipMalloc(&d_b, nbytes); CUDA_CHECK;
hipMalloc(&d_c, nbytes); CUDA_CHECK;
// CPU => GPU
hipMemcpy(d_a, a, (size_t)(n)*sizeof(float), hipMemcpyHostToDevice); CUDA_CHECK;
hipMemcpy(d_b, b, (size_t)(n)*sizeof(float), hipMemcpyHostToDevice); CUDA_CHECK;
hipMemcpy(d_c, c, (size_t)(n)*sizeof(float), hipMemcpyHostToDevice); CUDA_CHECK;
// launch kernel
dim3 block = dim3(128,1,1);
dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
hipLaunchKernelGGL(( addArraysKernel) , dim3(grid),dim3(block), 0, 0, d_a, d_b, d_c, n);
// GPU => CPU
hipMemcpy(a, d_a, (size_t)(n)*sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(b, d_b, (size_t)(n)*sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(c, d_c, (size_t)(n)*sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;
// print result
cout << "GPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
delete[] b;
delete[] c;
// free GPU arrays
hipFree(d_a); CUDA_CHECK;
hipFree(d_b); CUDA_CHECK;
hipFree(d_c); CUDA_CHECK;
}
| 39a7b850e92c2a1b895c87364e3f1caa071bc5bc.cu | // ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
// perform the actual computation on GPU
__device__
void addArrays(float* d_a, float* d_b, float* d_c, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n) d_c[i] = d_a[i] + d_b[i];
}
// kernel to call from the main function
__global__
void addArraysKernel(float* d_a, float* d_b, float* d_c, int n)
{
addArrays(d_a, d_b, d_c, n);
}
int main(int argc, char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 20;
float *a = new float[n];
float *b = new float[n];
float *c = new float[n];
for(int i=0; i<n; i++)
{
a[i] = i;
b[i] = (i%5)+1;
c[i] = 0;
}
// CPU computation
for(int i=0; i<n; i++) c[i] = a[i] + b[i];
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// init c
for(int i=0; i<n; i++) c[i] = 0;
// GPU computation
// allocate memory on GPU
size_t nbytes = (size_t)(n)*sizeof(float);
float* d_a = NULL;
float* d_b = NULL;
float* d_c = NULL;
cudaMalloc(&d_a, nbytes); CUDA_CHECK;
cudaMalloc(&d_b, nbytes); CUDA_CHECK;
cudaMalloc(&d_c, nbytes); CUDA_CHECK;
// CPU => GPU
cudaMemcpy(d_a, a, (size_t)(n)*sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_b, b, (size_t)(n)*sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_c, c, (size_t)(n)*sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
// launch kernel
dim3 block = dim3(128,1,1);
dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
addArraysKernel <<<grid,block>>> (d_a, d_b, d_c, n);
// GPU => CPU
cudaMemcpy(a, d_a, (size_t)(n)*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(b, d_b, (size_t)(n)*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(c, d_c, (size_t)(n)*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
// print result
cout << "GPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
delete[] b;
delete[] c;
// free GPU arrays
cudaFree(d_a); CUDA_CHECK;
cudaFree(d_b); CUDA_CHECK;
cudaFree(d_c); CUDA_CHECK;
}
|
0304507017ad8fec09662f0f3f4f1f853a7e530f.hip | // !!! This is a file automatically generated by hipify!!!
/*
rta.cu
Copyright (c) Michael Strickland
GNU General Public License (GPLv3)
See detailed text in license directory
*/
#include <iostream>
#include <iomanip>
#include <fstream>
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <stdio.h>
#include <gsl/gsl_sf_hyperg.h>
#include <gsl/gsl_sf_gamma.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
#include <helper_functions.h>
using namespace std;
#include "rta.h"
#include "outputroutines.h"
#include "paramreader.h"
#include "memory.h"
// defines
#define TIDX(i,j) (j + i*(i + 1)/2)
#define BLOCKSIZE1 256
#define BLOCKSIZE2 128
// constants that are shared with the GPU
__constant__ int NUM;
__constant__ double DTAU,A_0,T_0,EB;
__constant__ int N,M;
__constant__ double PZ,PT;
__constant__ double M_PI;
__constant__ double hbarc;
// these global vars are initialized from parameters file
// defaults set here are overridden by that file
int num = 100, maxiters = 10, update = 10, snapupdate = 20;
double fpieb = 1; // 4 Pi eta / S
double t0 = 0.25; // initial time in fm/c
double tf = 20; // final time in fm/c
double T0 = 0.6; // initial temperature in GeV
double a0 = 1; // initial anisotropy a0 = 1/sqrt(1+xi0)
// time step
double dt;
// this holds the current values of T^4
double *t4;
// this holds the updated values of T^4
double *T4;
// this holds the integration abscissae (timeGrid)
double *t;
// parameters for moment computation
int computeMoments=0, maxN=4, maxM=4;
// this will hold the final solution for the distribution function f for a fix w and pt
double *f;
int computeDist=0,numPZ=40,numPT=40, fStep=1;;
double maxPT=2, maxPZ=2;
// these hold the values of hnm and the initial value array for the general moment equation
double *hnm,*hnm0;
// these are pointers for the device memory
double *dev_t4, *dev_T4, *dev_time, *dev_d, *dev_h, *dev_hnm, *dev_hnm0, *dev_m, *dev_f;
/*----------------------------------------------------------------------------------------------------*/
// Special functions
/*----------------------------------------------------------------------------------------------------*/
__device__ double H(double y) {
if (y==1) return 2;
if (fabs(y)<1) return y*(fabs(y) + asin(sqrt(1-y*y))/sqrt(1-y*y));
if (fabs(y)>1) return y*(fabs(y) + asinh(sqrt(y*y-1))/sqrt(y*y-1));
return 0;
}
double hostH(double y) {
if (y==1) return 2;
if (fabs(y)<1) return y*(fabs(y) + asin(sqrt(1-y*y))/sqrt(1-y*y));
if (fabs(y)>1) return y*(fabs(y) + asinh(sqrt(y*y-1))/sqrt(y*y-1));
return 0;
}
double my2F1(double a, double b, double c, double z)
{
if (fabs(z)<=1) return gsl_sf_hyperg_2F1(a,b,c,z);
if (z<-1) return pow(1-z,-a)*gsl_sf_hyperg_2F1(a,c-b,c,z/(z-1));
else { cout << "mu2F1 err" << endl; exit(-1); }
}
double H(int n, int m, double y) {
if (n==1) return 2*pow(y,2*m+1)/(2*m+1);
if (y==0) return 0;
if (y==1) return 2./(2*m+1);
return 2*pow(y,2*m+1)*my2F1(0.5+m, 0.5*(1-n), 1.5+m, 1-y*y)/(2*m+1);
}
/*----------------------------------------------------------------------------------------------------*/
// Damping function
/*----------------------------------------------------------------------------------------------------*/
__device__ double D(int i2, int i1, double *lt4, double *lt) {
if (i1==i2) return 1;
double res = 0, w = 1;
for (int j = i1; j <= i2; j++) {
if (j==i1 || j==i2) w = 0.5;
else w = 1.0;
res += w*pow(lt4[j],0.25)*lt[j];
}
res *= DTAU/hbarc/EB/5.;
return exp(-res);
}
/*----------------------------------------------------------------------------------------------------*/
// Device routines for T^4 iterative computation
/*----------------------------------------------------------------------------------------------------*/
// right hand side for t4 update
__device__ double rhs(int i, double *lt4, double *lt, double *ld, double *lh) {
double res = 0;
double w = 1;
// second term
if (i>0) {
for (int ip = 0; ip <= i; ip++) {
if (ip==0 || ip==i) w = 0.5;
else w = 1.0;
res += w*ld[TIDX(i,ip)]*lh[TIDX(i,ip)]*pow(lt4[ip],1.25)*lt[ip];
}
res *= DTAU/hbarc/EB/10.;
}
// first term
res += ld[TIDX(i,0)]*pow(T_0,4.)*H(A_0*lt[0]/lt[i])/H(A_0);
// return result
return res;
}
// makes one iteration
__global__ void makeIteration(double *lt4, double *lT4, double *lt, double *ld, double *lh) {
//printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid<NUM) {
lT4[tid] = rhs(tid,lt4,lt,ld,lh);
tid += blockDim.x * gridDim.x;
}
}
// load damping function
__global__ void loadDampingFunction(double *lt4, double *lt, double *ld) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < NUM*(NUM+1)/2) {
int row = floor(-0.5 + sqrt(0.25 + 2 * tid));
int triangularNumber = row * (row + 1) / 2;
int column = tid - triangularNumber;
ld[tid] = D(row,column,lt4,lt);
tid += blockDim.x * gridDim.x;
}
}
// load H function
__global__ void loadHFunction(double *lt, double *lh) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < NUM*(NUM+1)/2) {
int row = floor(-0.5 + sqrt(0.25 + 2 * tid));
int triangularNumber = row * (row + 1) / 2;
int column = tid - triangularNumber;
lh[tid] = H(lt[column]/lt[row]);
tid += blockDim.x * gridDim.x;
}
}
/*----------------------------------------------------------------------------------------------------*/
// Device routines for general moment computation
/*----------------------------------------------------------------------------------------------------*/
// right hand side for mnm update
__device__ double rhsMNM(int i, double *lt4, double *lt, double *ld, double *lhnm, double *lhnm0, double lH0) {
double res = 0;
double w = 1;
int r = N + 2*M + 2;
// second term
if (i>0) {
for (int ip = 0; ip <= i; ip++) {
if (ip==0 || ip==i) w = 0.5;
else w = 1.0;
res += w*ld[TIDX(i,ip)]*lhnm[TIDX(i,ip)]*pow(lt4[ip],0.25*(1+r))*lt[ip];
}
res *= DTAU/hbarc/EB/5.;
}
// first term
res += pow(2.,0.25*r)*ld[TIDX(i,0)]*pow(T_0,r)*lhnm0[i]/pow(lH0,0.25*r);
// return result
return tgamma((double)r)*res/2/2/M_PI/M_PI;
}
// makes one iteration; this is a "kernel"
__global__ void computeMNM(double *lm, double *lt4, double *lt, double *ld, double *lhnm, double *lh, double *lhnm0, double H0) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid<NUM) {
lm[tid] = rhsMNM(tid,lt4,lt,ld,lhnm,lhnm0,H0);
tid += blockDim.x * gridDim.x;
}
}
/*----------------------------------------------------------------------------------------------------*/
// Host routines for calculating f
/*----------------------------------------------------------------------------------------------------*/
// right hand side for f update
__device__ double rhsF(int i, double *lt4, double *lt, double *ld) {
double res = 0, feq=0, T=1;
double w = 1;
// second term
if (i>0) {
for (int ip = 0; ip <= i; ip++) {
if (ip==0 || ip==i) w = 0.5;
else w = 1.0;
T = pow(lt4[ip],0.25);
feq = exp(-sqrt(PZ*PZ+PT*PT));
res += w*ld[TIDX(i,ip)]*feq*T*lt[ip];
}
res *= DTAU/hbarc/EB/5.;
}
// first term
T = pow(lt4[i],0.25);
double l0 = pow(2./H(A_0),0.25)*T_0;
double f0 = exp(-sqrt(pow(PZ*lt[i]/(A_0*lt[0]),2) + PT*PT)/(l0/T));
res += ld[TIDX(i,0)]*f0;
// return result
return res;
}
// makes one iteration; this is a "kernel"
__global__ void computeF(double *lf, double *lt4, double *lt, double *ld) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid<NUM) {
lf[tid] = rhsF(tid,lt4,lt,ld);
tid += blockDim.x * gridDim.x;
}
}
/*----------------------------------------------------------------------------------------------------*/
// Host routines for T^4 iterations
/*----------------------------------------------------------------------------------------------------*/
void makeIterations(double *lt4, double *lT4, double *lt, double *ld, double *lh) {
outputMeasurements(0);
outputTemperatureSnapshot(t4,0,"T");
// load H function
hipLaunchKernelGGL(( loadHFunction), dim3(num*(num+1)/2/BLOCKSIZE1),dim3(BLOCKSIZE1), 0, 0, lt,lh);
checkCudaErrors(hipDeviceSynchronize());
for (int i=1; i<=maxiters;i++) {
// load D function
hipLaunchKernelGGL(( loadDampingFunction), dim3(num*(num+1)/2/BLOCKSIZE1),dim3(BLOCKSIZE1), 0, 0, lt4,lt,ld);
hipDeviceSynchronize();
// make an iteration
hipLaunchKernelGGL(( makeIteration), dim3(num/BLOCKSIZE2),dim3(BLOCKSIZE2), 0, 0, lt4,lT4,lt,ld,lh);
hipDeviceSynchronize();
// swap pointers to make old <-> new
swapPointers(<4,&lT4);
// output some stuff if appropriate
if (i%update==0) {
hipMemcpy(t4, lt4, num*sizeof(double), hipMemcpyDeviceToHost);
outputMeasurements(i);
}
if (i%snapupdate==0) {
hipMemcpy(t4, lt4, num*sizeof(double), hipMemcpyDeviceToHost);
outputTemperatureSnapshot(t4,i,"T");
}
}
// load the device d function based on final result and copy t4 back to host for subsequent use
hipLaunchKernelGGL(( loadDampingFunction), dim3(num*(num+1)/2/BLOCKSIZE1),dim3(BLOCKSIZE1), 0, 0, lt4,lt,ld);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(t4, lt4, num*sizeof(double), hipMemcpyDeviceToHost));
}
// loads integration abscissae
void loadTimeGrid() {
cout << "==> Loading time grid" << endl;
double ltf = log(tf);
double lt0 = log(t0);
dt = (ltf-lt0)/(num-1);
for (int i = 0; i < num; i++) t[i] = exp(lt0 + i*dt);
}
// initializes t4 array
void initializeT4() {
cout << "==> Initializing T^4 array" << endl;
t4[0] = T0*T0*T0*T0;
for (int i=1; i < num; i++) {
t4[i] = T0*T0*T0*T0*pow(t0/t[i],4./3.);
}
}
/*----------------------------------------------------------------------------------------------------*/
// Host routines for general moment computation
/*----------------------------------------------------------------------------------------------------*/
// initializes hnm array
void setupHNM(int n, int m) {
for (int idx=0; idx < num*(num+1)/2; idx++) {
int row = floor(-0.5 + sqrt(0.25 + 2 * idx));
int triangularNumber = row * (row + 1) / 2;
int column = idx - triangularNumber;
hnm[idx] = H(n,m,t[column]/t[row]);
}
for (int idx=0; idx < num; idx++)
hnm0[idx] = H(n,m,t[0]*a0/t[idx]);
}
// computes a general moment based on the current iterations results for t4
double* computeMoment(int n, int m) {
cout << "==> Computing M(" << n << "," << m << ")" << endl;
hipMemcpyToSymbol(&N, &n, sizeof(int));
hipMemcpyToSymbol(&M, &m, sizeof(int));
setupHNM(n,m);
hipMemcpy(dev_hnm, hnm, sizeof(double)*num*(num+1)/2, hipMemcpyHostToDevice); // transfer to device
hipMemcpy(dev_hnm0, hnm0, sizeof(double)*num, hipMemcpyHostToDevice); // transfer to device
double *lm;
lm = allocate1DArray();
hipLaunchKernelGGL(( computeMNM), dim3(num/BLOCKSIZE2),dim3(BLOCKSIZE2), 0, 0, dev_m, dev_t4, dev_time, dev_d, dev_hnm, dev_h, dev_hnm0, hostH(a0));
hipMemcpy(lm, dev_m, num*sizeof(double), hipMemcpyDeviceToHost);
return lm;
}
// computes a general moment based on an equilbrium form with t4
inline double computeEQMoment(int n, int m, int i) {
int r = n+2*m+2;
return gsl_sf_gamma(r)*pow(t4[i],0.25*r)*2/(2*m+1)/2/2/M_PI/M_PI;
}
/*----------------------------------------------------------------------------------------------------*/
// Host routines for f computation
/*----------------------------------------------------------------------------------------------------*/
// computes f based on the current iterations results for t4
double* computeDistributionFunction(double pz, double pt) {
//cout << "==> Computing f(" << pz << "," << pt << ")" << endl;
hipMemcpyToSymbol(&PZ, &pz, sizeof(double));
hipMemcpyToSymbol(&PT, &pt, sizeof(double));
double *lf;
lf = allocate1DArray();
hipLaunchKernelGGL(( computeF), dim3(num/BLOCKSIZE2),dim3(BLOCKSIZE2), 0, 0, dev_f, dev_t4, dev_time, dev_d);
hipMemcpy(lf, dev_f, num*sizeof(double), hipMemcpyDeviceToHost);
return lf;
}
/*----------------------------------------------------------------------------------------------------*/
// Main routine
/*----------------------------------------------------------------------------------------------------*/
int main(int argc, char** argv) {
const double m_pi = 4.0 * atan(1.0);
char fname[20]; // for later use
print_line();
// read parameters from file and command line
readParametersFromFile("params.txt",1);
if (argc>1) {
print_line();
cout << "Parameters from commandline" << endl;
print_line();
readParametersFromCommandLine(argc,argv,1);
}
// perform any processing of parameters necessary
processParameters();
print_line();
print_line();
print_line();
//setup
allocateMemory();
loadTimeGrid();
initializeT4();
print_line();
// copy grid and initial conditions to device
checkCudaErrors(hipMemcpy(dev_t4, t4, sizeof(double)*num, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dev_T4, T4, sizeof(double)*num, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dev_time, t, sizeof(double)*num, hipMemcpyHostToDevice));
print_line();
// copy parameters to device memory
checkCudaErrors(hipMemcpyToSymbol(NUM, &num, sizeof(int)));
checkCudaErrors(hipMemcpyToSymbol(A_0, &a0, sizeof(double)));
checkCudaErrors(hipMemcpyToSymbol(T_0, &T0, sizeof(double)));
double eb = fpieb/m_pi/4.;
checkCudaErrors(hipMemcpyToSymbol(EB, &eb, sizeof(double)));
checkCudaErrors(hipMemcpyToSymbol(DTAU, &dt, sizeof(double)));
print_line();
// Copy to grid global constants
checkCudaErrors(hipMemcpyToSymbol(M_PI, &m_pi, sizeof(double)));
checkCudaErrors(hipMemcpyToSymbol(hbarc, &HBARC, sizeof(double)));
// print some stuff
print_line();
cout.width(dwidth); cout << "iteration";
cout.width(dwidth); cout << "T[0]";
cout.width(dwidth); cout << "T[num/2]";
cout.width(dwidth); cout << "T[num-1]";
cout << endl;
print_line();
/*----------------------------------------------------------------------------------------------------*/
// Iterations
/*----------------------------------------------------------------------------------------------------*/
cout << "4 pi eta / S: " << fpieb << endl;
makeIterations(dev_t4, dev_T4, dev_time, dev_d, dev_h);
/*----------------------------------------------------------------------------------------------------*/
// Compute some things with the solution
/*----------------------------------------------------------------------------------------------------*/
print_line();
double *ed, *pl,*pt,*plopt;
ed = allocate1DArray();
pl = computeMoment(0,1);
pt = allocate1DArray();
plopt = allocate1DArray();
for (int i=0; i<num; i++) {
ed[i] = 3*t4[i]/m_pi/m_pi;
pt[i] = 0.5*(ed[i] - pl[i]);
plopt[i] = pl[i]/pt[i];
cout << ed[i] << "\t" << pl[i] << "\t" << pt[i] << endl;
}
outputArray(ed,"ed");
outputArray(pl,"pl");
outputArray(pt,"pt");
outputArray(plopt,"pratio");
// compute distribution function
if (computeDist==1) {
print_line();
cout << "==> Computing f ";
double ***f3DArray;
f3DArray = allocate3DArray(num/fStep,numPZ,numPT);
double dpz = maxPZ/(numPZ-1);
double dpt = maxPT/(numPT-1);
for (int i=0; i<numPZ; i++) {
for (int j=0; j<numPT; j++) {
double *f;
f = computeDistributionFunction(i*dpz,j*dpt);
for (int k=0; k<num/fStep; k++) f3DArray[k][i][j] = f[k*fStep]; // load into f array for later binary output
free1DArray(f);
}
cout << "." << std::flush;
}
cout << endl;
outputDistribution(f3DArray); // output f in binary format
free3DArray(f3DArray,num/fStep,numPZ,numPT);
}
if (computeMoments==1) {
// loop over moments
print_line();
for (int n=0; n<=maxN; n++) {
for (int m=0; m<=maxM; m++) {
double *mom;
mom = computeMoment(n,m);
sprintf(fname,"moms/m-%d-%d",n,m);
outputArray(mom,fname);
for (int i=0; i<num; i++) mom[i] /= computeEQMoment(n,m,i);
sprintf(fname,"moms/m-%d-%d-scaled",n,m);
outputScaledArray(mom,t4,5*eb,fname);
free1DArray(mom);
}
}
}
free1DArray(ed);
free1DArray(pl);
free1DArray(pt);
free1DArray(plopt);
/*----------------------------------------------------------------------------------------------------*/
// print some more stuff
print_line();
cout << "Done.\n";
print_line();
// free memory
freeMemory();
return 0;
}
| 0304507017ad8fec09662f0f3f4f1f853a7e530f.cu | /*
rta.cu
Copyright (c) Michael Strickland
GNU General Public License (GPLv3)
See detailed text in license directory
*/
#include <iostream>
#include <iomanip>
#include <fstream>
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <stdio.h>
#include <gsl/gsl_sf_hyperg.h>
#include <gsl/gsl_sf_gamma.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
#include <helper_functions.h>
using namespace std;
#include "rta.h"
#include "outputroutines.h"
#include "paramreader.h"
#include "memory.h"
// defines
#define TIDX(i,j) (j + i*(i + 1)/2)
#define BLOCKSIZE1 256
#define BLOCKSIZE2 128
// constants that are shared with the GPU
__constant__ int NUM;
__constant__ double DTAU,A_0,T_0,EB;
__constant__ int N,M;
__constant__ double PZ,PT;
__constant__ double M_PI;
__constant__ double hbarc;
// these global vars are initialized from parameters file
// defaults set here are overridden by that file
int num = 100, maxiters = 10, update = 10, snapupdate = 20;
double fpieb = 1; // 4 Pi eta / S
double t0 = 0.25; // initial time in fm/c
double tf = 20; // final time in fm/c
double T0 = 0.6; // initial temperature in GeV
double a0 = 1; // initial anisotropy a0 = 1/sqrt(1+xi0)
// time step
double dt;
// this holds the current values of T^4
double *t4;
// this holds the updated values of T^4
double *T4;
// this holds the integration abscissae (timeGrid)
double *t;
// parameters for moment computation
int computeMoments=0, maxN=4, maxM=4;
// this will hold the final solution for the distribution function f for a fix w and pt
double *f;
int computeDist=0,numPZ=40,numPT=40, fStep=1;;
double maxPT=2, maxPZ=2;
// these hold the values of hnm and the initial value array for the general moment equation
double *hnm,*hnm0;
// these are pointers for the device memory
double *dev_t4, *dev_T4, *dev_time, *dev_d, *dev_h, *dev_hnm, *dev_hnm0, *dev_m, *dev_f;
/*----------------------------------------------------------------------------------------------------*/
// Special functions
/*----------------------------------------------------------------------------------------------------*/
__device__ double H(double y) {
if (y==1) return 2;
if (fabs(y)<1) return y*(fabs(y) + asin(sqrt(1-y*y))/sqrt(1-y*y));
if (fabs(y)>1) return y*(fabs(y) + asinh(sqrt(y*y-1))/sqrt(y*y-1));
return 0;
}
double hostH(double y) {
if (y==1) return 2;
if (fabs(y)<1) return y*(fabs(y) + asin(sqrt(1-y*y))/sqrt(1-y*y));
if (fabs(y)>1) return y*(fabs(y) + asinh(sqrt(y*y-1))/sqrt(y*y-1));
return 0;
}
double my2F1(double a, double b, double c, double z)
{
if (fabs(z)<=1) return gsl_sf_hyperg_2F1(a,b,c,z);
if (z<-1) return pow(1-z,-a)*gsl_sf_hyperg_2F1(a,c-b,c,z/(z-1));
else { cout << "mu2F1 err" << endl; exit(-1); }
}
double H(int n, int m, double y) {
if (n==1) return 2*pow(y,2*m+1)/(2*m+1);
if (y==0) return 0;
if (y==1) return 2./(2*m+1);
return 2*pow(y,2*m+1)*my2F1(0.5+m, 0.5*(1-n), 1.5+m, 1-y*y)/(2*m+1);
}
/*----------------------------------------------------------------------------------------------------*/
// Damping function
/*----------------------------------------------------------------------------------------------------*/
__device__ double D(int i2, int i1, double *lt4, double *lt) {
if (i1==i2) return 1;
double res = 0, w = 1;
for (int j = i1; j <= i2; j++) {
if (j==i1 || j==i2) w = 0.5;
else w = 1.0;
res += w*pow(lt4[j],0.25)*lt[j];
}
res *= DTAU/hbarc/EB/5.;
return exp(-res);
}
/*----------------------------------------------------------------------------------------------------*/
// Device routines for T^4 iterative computation
/*----------------------------------------------------------------------------------------------------*/
// right hand side for t4 update
__device__ double rhs(int i, double *lt4, double *lt, double *ld, double *lh) {
double res = 0;
double w = 1;
// second term
if (i>0) {
for (int ip = 0; ip <= i; ip++) {
if (ip==0 || ip==i) w = 0.5;
else w = 1.0;
res += w*ld[TIDX(i,ip)]*lh[TIDX(i,ip)]*pow(lt4[ip],1.25)*lt[ip];
}
res *= DTAU/hbarc/EB/10.;
}
// first term
res += ld[TIDX(i,0)]*pow(T_0,4.)*H(A_0*lt[0]/lt[i])/H(A_0);
// return result
return res;
}
// makes one iteration
__global__ void makeIteration(double *lt4, double *lT4, double *lt, double *ld, double *lh) {
//printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid<NUM) {
lT4[tid] = rhs(tid,lt4,lt,ld,lh);
tid += blockDim.x * gridDim.x;
}
}
// load damping function
__global__ void loadDampingFunction(double *lt4, double *lt, double *ld) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < NUM*(NUM+1)/2) {
int row = floor(-0.5 + sqrt(0.25 + 2 * tid));
int triangularNumber = row * (row + 1) / 2;
int column = tid - triangularNumber;
ld[tid] = D(row,column,lt4,lt);
tid += blockDim.x * gridDim.x;
}
}
// load H function
__global__ void loadHFunction(double *lt, double *lh) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < NUM*(NUM+1)/2) {
int row = floor(-0.5 + sqrt(0.25 + 2 * tid));
int triangularNumber = row * (row + 1) / 2;
int column = tid - triangularNumber;
lh[tid] = H(lt[column]/lt[row]);
tid += blockDim.x * gridDim.x;
}
}
/*----------------------------------------------------------------------------------------------------*/
// Device routines for general moment computation
/*----------------------------------------------------------------------------------------------------*/
// right hand side for mnm update
__device__ double rhsMNM(int i, double *lt4, double *lt, double *ld, double *lhnm, double *lhnm0, double lH0) {
double res = 0;
double w = 1;
int r = N + 2*M + 2;
// second term
if (i>0) {
for (int ip = 0; ip <= i; ip++) {
if (ip==0 || ip==i) w = 0.5;
else w = 1.0;
res += w*ld[TIDX(i,ip)]*lhnm[TIDX(i,ip)]*pow(lt4[ip],0.25*(1+r))*lt[ip];
}
res *= DTAU/hbarc/EB/5.;
}
// first term
res += pow(2.,0.25*r)*ld[TIDX(i,0)]*pow(T_0,r)*lhnm0[i]/pow(lH0,0.25*r);
// return result
return tgamma((double)r)*res/2/2/M_PI/M_PI;
}
// makes one iteration; this is a "kernel"
__global__ void computeMNM(double *lm, double *lt4, double *lt, double *ld, double *lhnm, double *lh, double *lhnm0, double H0) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid<NUM) {
lm[tid] = rhsMNM(tid,lt4,lt,ld,lhnm,lhnm0,H0);
tid += blockDim.x * gridDim.x;
}
}
/*----------------------------------------------------------------------------------------------------*/
// Host routines for calculating f
/*----------------------------------------------------------------------------------------------------*/
// right hand side for f update
__device__ double rhsF(int i, double *lt4, double *lt, double *ld) {
double res = 0, feq=0, T=1;
double w = 1;
// second term
if (i>0) {
for (int ip = 0; ip <= i; ip++) {
if (ip==0 || ip==i) w = 0.5;
else w = 1.0;
T = pow(lt4[ip],0.25);
feq = exp(-sqrt(PZ*PZ+PT*PT));
res += w*ld[TIDX(i,ip)]*feq*T*lt[ip];
}
res *= DTAU/hbarc/EB/5.;
}
// first term
T = pow(lt4[i],0.25);
double l0 = pow(2./H(A_0),0.25)*T_0;
double f0 = exp(-sqrt(pow(PZ*lt[i]/(A_0*lt[0]),2) + PT*PT)/(l0/T));
res += ld[TIDX(i,0)]*f0;
// return result
return res;
}
// makes one iteration; this is a "kernel"
__global__ void computeF(double *lf, double *lt4, double *lt, double *ld) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid<NUM) {
lf[tid] = rhsF(tid,lt4,lt,ld);
tid += blockDim.x * gridDim.x;
}
}
/*----------------------------------------------------------------------------------------------------*/
// Host routines for T^4 iterations
/*----------------------------------------------------------------------------------------------------*/
void makeIterations(double *lt4, double *lT4, double *lt, double *ld, double *lh) {
outputMeasurements(0);
outputTemperatureSnapshot(t4,0,"T");
// load H function
loadHFunction<<<num*(num+1)/2/BLOCKSIZE1,BLOCKSIZE1>>>(lt,lh);
checkCudaErrors(cudaDeviceSynchronize());
for (int i=1; i<=maxiters;i++) {
// load D function
loadDampingFunction<<<num*(num+1)/2/BLOCKSIZE1,BLOCKSIZE1>>>(lt4,lt,ld);
cudaDeviceSynchronize();
// make an iteration
makeIteration<<<num/BLOCKSIZE2,BLOCKSIZE2>>>(lt4,lT4,lt,ld,lh);
cudaDeviceSynchronize();
// swap pointers to make old <-> new
swapPointers(<4,&lT4);
// output some stuff if appropriate
if (i%update==0) {
cudaMemcpy(t4, lt4, num*sizeof(double), cudaMemcpyDeviceToHost);
outputMeasurements(i);
}
if (i%snapupdate==0) {
cudaMemcpy(t4, lt4, num*sizeof(double), cudaMemcpyDeviceToHost);
outputTemperatureSnapshot(t4,i,"T");
}
}
// load the device d function based on final result and copy t4 back to host for subsequent use
loadDampingFunction<<<num*(num+1)/2/BLOCKSIZE1,BLOCKSIZE1>>>(lt4,lt,ld);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(t4, lt4, num*sizeof(double), cudaMemcpyDeviceToHost));
}
// loads integration abscissae
void loadTimeGrid() {
cout << "==> Loading time grid" << endl;
double ltf = log(tf);
double lt0 = log(t0);
dt = (ltf-lt0)/(num-1);
for (int i = 0; i < num; i++) t[i] = exp(lt0 + i*dt);
}
// initializes t4 array
void initializeT4() {
cout << "==> Initializing T^4 array" << endl;
t4[0] = T0*T0*T0*T0;
for (int i=1; i < num; i++) {
t4[i] = T0*T0*T0*T0*pow(t0/t[i],4./3.);
}
}
/*----------------------------------------------------------------------------------------------------*/
// Host routines for general moment computation
/*----------------------------------------------------------------------------------------------------*/
// initializes hnm array
void setupHNM(int n, int m) {
for (int idx=0; idx < num*(num+1)/2; idx++) {
int row = floor(-0.5 + sqrt(0.25 + 2 * idx));
int triangularNumber = row * (row + 1) / 2;
int column = idx - triangularNumber;
hnm[idx] = H(n,m,t[column]/t[row]);
}
for (int idx=0; idx < num; idx++)
hnm0[idx] = H(n,m,t[0]*a0/t[idx]);
}
// computes a general moment based on the current iterations results for t4
double* computeMoment(int n, int m) {
cout << "==> Computing M(" << n << "," << m << ")" << endl;
cudaMemcpyToSymbol(&N, &n, sizeof(int));
cudaMemcpyToSymbol(&M, &m, sizeof(int));
setupHNM(n,m);
cudaMemcpy(dev_hnm, hnm, sizeof(double)*num*(num+1)/2, cudaMemcpyHostToDevice); // transfer to device
cudaMemcpy(dev_hnm0, hnm0, sizeof(double)*num, cudaMemcpyHostToDevice); // transfer to device
double *lm;
lm = allocate1DArray();
computeMNM<<<num/BLOCKSIZE2,BLOCKSIZE2>>>(dev_m, dev_t4, dev_time, dev_d, dev_hnm, dev_h, dev_hnm0, hostH(a0));
cudaMemcpy(lm, dev_m, num*sizeof(double), cudaMemcpyDeviceToHost);
return lm;
}
// computes a general moment based on an equilbrium form with t4
inline double computeEQMoment(int n, int m, int i) {
int r = n+2*m+2;
return gsl_sf_gamma(r)*pow(t4[i],0.25*r)*2/(2*m+1)/2/2/M_PI/M_PI;
}
/*----------------------------------------------------------------------------------------------------*/
// Host routines for f computation
/*----------------------------------------------------------------------------------------------------*/
// computes f based on the current iterations results for t4
double* computeDistributionFunction(double pz, double pt) {
//cout << "==> Computing f(" << pz << "," << pt << ")" << endl;
cudaMemcpyToSymbol(&PZ, &pz, sizeof(double));
cudaMemcpyToSymbol(&PT, &pt, sizeof(double));
double *lf;
lf = allocate1DArray();
computeF<<<num/BLOCKSIZE2,BLOCKSIZE2>>>(dev_f, dev_t4, dev_time, dev_d);
cudaMemcpy(lf, dev_f, num*sizeof(double), cudaMemcpyDeviceToHost);
return lf;
}
/*----------------------------------------------------------------------------------------------------*/
// Main routine
/*----------------------------------------------------------------------------------------------------*/
int main(int argc, char** argv) {
const double m_pi = 4.0 * atan(1.0);
char fname[20]; // for later use
print_line();
// read parameters from file and command line
readParametersFromFile("params.txt",1);
if (argc>1) {
print_line();
cout << "Parameters from commandline" << endl;
print_line();
readParametersFromCommandLine(argc,argv,1);
}
// perform any processing of parameters necessary
processParameters();
print_line();
print_line();
print_line();
//setup
allocateMemory();
loadTimeGrid();
initializeT4();
print_line();
// copy grid and initial conditions to device
checkCudaErrors(cudaMemcpy(dev_t4, t4, sizeof(double)*num, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dev_T4, T4, sizeof(double)*num, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dev_time, t, sizeof(double)*num, cudaMemcpyHostToDevice));
print_line();
// copy parameters to device memory
checkCudaErrors(cudaMemcpyToSymbol(NUM, &num, sizeof(int)));
checkCudaErrors(cudaMemcpyToSymbol(A_0, &a0, sizeof(double)));
checkCudaErrors(cudaMemcpyToSymbol(T_0, &T0, sizeof(double)));
double eb = fpieb/m_pi/4.;
checkCudaErrors(cudaMemcpyToSymbol(EB, &eb, sizeof(double)));
checkCudaErrors(cudaMemcpyToSymbol(DTAU, &dt, sizeof(double)));
print_line();
// Copy to grid global constants
checkCudaErrors(cudaMemcpyToSymbol(M_PI, &m_pi, sizeof(double)));
checkCudaErrors(cudaMemcpyToSymbol(hbarc, &HBARC, sizeof(double)));
// print some stuff
print_line();
cout.width(dwidth); cout << "iteration";
cout.width(dwidth); cout << "T[0]";
cout.width(dwidth); cout << "T[num/2]";
cout.width(dwidth); cout << "T[num-1]";
cout << endl;
print_line();
/*----------------------------------------------------------------------------------------------------*/
// Iterations
/*----------------------------------------------------------------------------------------------------*/
cout << "4 pi eta / S: " << fpieb << endl;
makeIterations(dev_t4, dev_T4, dev_time, dev_d, dev_h);
/*----------------------------------------------------------------------------------------------------*/
// Compute some things with the solution
/*----------------------------------------------------------------------------------------------------*/
print_line();
double *ed, *pl,*pt,*plopt;
ed = allocate1DArray();
pl = computeMoment(0,1);
pt = allocate1DArray();
plopt = allocate1DArray();
for (int i=0; i<num; i++) {
ed[i] = 3*t4[i]/m_pi/m_pi;
pt[i] = 0.5*(ed[i] - pl[i]);
plopt[i] = pl[i]/pt[i];
cout << ed[i] << "\t" << pl[i] << "\t" << pt[i] << endl;
}
outputArray(ed,"ed");
outputArray(pl,"pl");
outputArray(pt,"pt");
outputArray(plopt,"pratio");
// compute distribution function
if (computeDist==1) {
print_line();
cout << "==> Computing f ";
double ***f3DArray;
f3DArray = allocate3DArray(num/fStep,numPZ,numPT);
double dpz = maxPZ/(numPZ-1);
double dpt = maxPT/(numPT-1);
for (int i=0; i<numPZ; i++) {
for (int j=0; j<numPT; j++) {
double *f;
f = computeDistributionFunction(i*dpz,j*dpt);
for (int k=0; k<num/fStep; k++) f3DArray[k][i][j] = f[k*fStep]; // load into f array for later binary output
free1DArray(f);
}
cout << "." << std::flush;
}
cout << endl;
outputDistribution(f3DArray); // output f in binary format
free3DArray(f3DArray,num/fStep,numPZ,numPT);
}
if (computeMoments==1) {
// loop over moments
print_line();
for (int n=0; n<=maxN; n++) {
for (int m=0; m<=maxM; m++) {
double *mom;
mom = computeMoment(n,m);
sprintf(fname,"moms/m-%d-%d",n,m);
outputArray(mom,fname);
for (int i=0; i<num; i++) mom[i] /= computeEQMoment(n,m,i);
sprintf(fname,"moms/m-%d-%d-scaled",n,m);
outputScaledArray(mom,t4,5*eb,fname);
free1DArray(mom);
}
}
}
free1DArray(ed);
free1DArray(pl);
free1DArray(pt);
free1DArray(plopt);
/*----------------------------------------------------------------------------------------------------*/
// print some more stuff
print_line();
cout << "Done.\n";
print_line();
// free memory
freeMemory();
return 0;
}
|
5d7c930564cca9b7cbe024e4025a6274d70fdc18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "utils/utils.h"
#define NMAX (1<<20)
// ~TODO 3~
// Modify the kernel below such as each element of the
// array will be now equal to 0 if it is an even number
// or 1, if it is an odd number
__global__ void kernel_parity_id(int *a, int N) {
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N) a[i] = a[i] % 2;
}
// ~TODO 4~
// Modify the kernel below such as each element will
// be equal to the BLOCK ID this computation takes
// place.
__global__ void kernel_block_id(int *a, int N) {
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N) a[i] = blockIdx.x;
}
// ~TODO 5~
// Modify the kernel below such as each element will
// be equal to the THREAD ID this computation takes
// place.
__global__ void kernel_thread_id(int *a, int N) {
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N) a[i] = threadIdx.x;
}
int main(void) {
int nDevices;
// Get the number of CUDA-capable GPU(s)
hipGetDeviceCount(&nDevices);
// ~TODO 1~
// For each device, show some details in the format below,
// then set as active device the first one (assuming there
// is at least CUDA-capable device). Pay attention to the
// type of the fields in the hipDeviceProp_t structure.
//
// Device number: <i>
// Device name: <name>
// Total memory: <mem>
// Memory Clock Rate (KHz): <mcr>
// Memory Bus Width (bits): <mbw>
//
// Hint: look for hipGetDeviceProperties and hipSetDevice in
// the Cuda Toolkit Documentation.
for (int i = 0; i < nDevices; ++i) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
fprintf(stdout, "Device number: <%d>\n", i);
fprintf(stdout, "\tDevice name: <%s>\n", prop.name);
fprintf(stdout, "\tTotal memory: <%zu>\n", prop.totalGlobalMem);
fprintf(stdout, "\tMemory Clock Rate (KHz): <%d>\n", prop.memoryClockRate);
fprintf(stdout, "\tMemory Bus Width (bits): <%d>\n", prop.memoryBusWidth);
}
// ~TODO 2~
// With information from example_2.cu, allocate an array with
// integers (where a[i] = i). Then, modify the three kernels
// above and execute them using 4 blocks, each with 4 threads.
// Hint: num_elements = block_size * block_no (see example_2)
//
// You can use the fill_array_int(int *a, int n) function (from utils)
// to fill your array as many times you want.
int *host_array;
int *device_array;
hipError_t rc;
host_array = (int *) malloc(NMAX * sizeof(int));
rc = hipMalloc(&device_array, NMAX * sizeof(int));
if (!host_array || !device_array || rc != hipSuccess)
exit(-1);
fill_array_int(host_array, NMAX);
rc = hipMemcpy(device_array, host_array, NMAX * sizeof(int),
hipMemcpyHostToDevice);
if (rc != hipSuccess)
exit(-1);
// ~TODO 3~
// Execute kernel_parity_id kernel and then copy from
// the device to the host; call hipDeviceSynchronize()
// after a kernel execution for safety purposes.
//
// Uncomment the line below to check your results
hipLaunchKernelGGL(( kernel_parity_id), dim3(NMAX / 4), dim3(4), 0, 0, device_array, NMAX);
rc = hipMemcpy(host_array, device_array, NMAX * sizeof(int),
hipMemcpyDeviceToHost);
check_task_1(3, host_array);
// ~TODO 4~
// Execute kernel_block_id kernel and then copy from
// the device to the host;
//
// Uncomment the line below to check your results
hipLaunchKernelGGL(( kernel_block_id), dim3(NMAX / 4), dim3(4), 0, 0, device_array, NMAX);
rc = hipMemcpy(host_array, device_array, NMAX * sizeof(int),
hipMemcpyDeviceToHost);
check_task_1(4, host_array);
// ~TODO 5~
// Execute kernel_thread_id kernel and then copy from
// the device to the host;
//
// Uncomment the line below to check your results
hipLaunchKernelGGL(( kernel_thread_id), dim3(NMAX / 4), dim3(NMAX), 0, 0, device_array, NMAX);
rc = hipMemcpy(host_array, device_array, NMAX * sizeof(int),
hipMemcpyDeviceToHost);
check_task_1(5, host_array);
// TODO 6: Free the memory
free(host_array);
hipFree(device_array);
return 0;
}
| 5d7c930564cca9b7cbe024e4025a6274d70fdc18.cu | #include <stdio.h>
#include "utils/utils.h"
#define NMAX (1<<20)
// ~TODO 3~
// Modify the kernel below such as each element of the
// array will be now equal to 0 if it is an even number
// or 1, if it is an odd number
__global__ void kernel_parity_id(int *a, int N) {
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N) a[i] = a[i] % 2;
}
// ~TODO 4~
// Modify the kernel below such as each element will
// be equal to the BLOCK ID this computation takes
// place.
__global__ void kernel_block_id(int *a, int N) {
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N) a[i] = blockIdx.x;
}
// ~TODO 5~
// Modify the kernel below such as each element will
// be equal to the THREAD ID this computation takes
// place.
__global__ void kernel_thread_id(int *a, int N) {
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N) a[i] = threadIdx.x;
}
int main(void) {
int nDevices;
// Get the number of CUDA-capable GPU(s)
cudaGetDeviceCount(&nDevices);
// ~TODO 1~
// For each device, show some details in the format below,
// then set as active device the first one (assuming there
// is at least CUDA-capable device). Pay attention to the
// type of the fields in the cudaDeviceProp structure.
//
// Device number: <i>
// Device name: <name>
// Total memory: <mem>
// Memory Clock Rate (KHz): <mcr>
// Memory Bus Width (bits): <mbw>
//
// Hint: look for cudaGetDeviceProperties and cudaSetDevice in
// the Cuda Toolkit Documentation.
for (int i = 0; i < nDevices; ++i) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
fprintf(stdout, "Device number: <%d>\n", i);
fprintf(stdout, "\tDevice name: <%s>\n", prop.name);
fprintf(stdout, "\tTotal memory: <%zu>\n", prop.totalGlobalMem);
fprintf(stdout, "\tMemory Clock Rate (KHz): <%d>\n", prop.memoryClockRate);
fprintf(stdout, "\tMemory Bus Width (bits): <%d>\n", prop.memoryBusWidth);
}
// ~TODO 2~
// With information from example_2.cu, allocate an array with
// integers (where a[i] = i). Then, modify the three kernels
// above and execute them using 4 blocks, each with 4 threads.
// Hint: num_elements = block_size * block_no (see example_2)
//
// You can use the fill_array_int(int *a, int n) function (from utils)
// to fill your array as many times you want.
int *host_array;
int *device_array;
cudaError_t rc;
host_array = (int *) malloc(NMAX * sizeof(int));
rc = cudaMalloc(&device_array, NMAX * sizeof(int));
if (!host_array || !device_array || rc != cudaSuccess)
exit(-1);
fill_array_int(host_array, NMAX);
rc = cudaMemcpy(device_array, host_array, NMAX * sizeof(int),
cudaMemcpyHostToDevice);
if (rc != cudaSuccess)
exit(-1);
// ~TODO 3~
// Execute kernel_parity_id kernel and then copy from
// the device to the host; call cudaDeviceSynchronize()
// after a kernel execution for safety purposes.
//
// Uncomment the line below to check your results
kernel_parity_id<<<NMAX / 4, 4>>>(device_array, NMAX);
rc = cudaMemcpy(host_array, device_array, NMAX * sizeof(int),
cudaMemcpyDeviceToHost);
check_task_1(3, host_array);
// ~TODO 4~
// Execute kernel_block_id kernel and then copy from
// the device to the host;
//
// Uncomment the line below to check your results
kernel_block_id<<<NMAX / 4, 4>>>(device_array, NMAX);
rc = cudaMemcpy(host_array, device_array, NMAX * sizeof(int),
cudaMemcpyDeviceToHost);
check_task_1(4, host_array);
// ~TODO 5~
// Execute kernel_thread_id kernel and then copy from
// the device to the host;
//
// Uncomment the line below to check your results
kernel_thread_id<<<NMAX / 4, NMAX>>>(device_array, NMAX);
rc = cudaMemcpy(host_array, device_array, NMAX * sizeof(int),
cudaMemcpyDeviceToHost);
check_task_1(5, host_array);
// TODO 6: Free the memory
free(host_array);
cudaFree(device_array);
return 0;
}
|
67d123b3d60ff3a8176a5036aefed9ceebbf150f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_cellz;
int xdim0_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim0_initialise_chunk_kernel_cellz;
int ydim0_initialise_chunk_kernel_cellz_h = -1;
__constant__ int xdim1_initialise_chunk_kernel_cellz;
int xdim1_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim1_initialise_chunk_kernel_cellz;
int ydim1_initialise_chunk_kernel_cellz_h = -1;
__constant__ int xdim2_initialise_chunk_kernel_cellz;
int xdim2_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim2_initialise_chunk_kernel_cellz;
int ydim2_initialise_chunk_kernel_cellz_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x, y, z) \
(x + xdim0_initialise_chunk_kernel_cellz * (y) + \
xdim0_initialise_chunk_kernel_cellz * ydim0_initialise_chunk_kernel_cellz * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_initialise_chunk_kernel_cellz * (y) + \
xdim1_initialise_chunk_kernel_cellz * ydim1_initialise_chunk_kernel_cellz * \
(z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_initialise_chunk_kernel_cellz * (y) + \
xdim2_initialise_chunk_kernel_cellz * ydim2_initialise_chunk_kernel_cellz * \
(z))
// user function
__device__
void
initialise_chunk_kernel_cellz_gpu(const double *vertexz, double *cellz,
double *celldz) {
double d_z = (grid.zmax - grid.zmin) / (double)grid.z_cells;
cellz[OPS_ACC1(0, 0, 0)] =
0.5 * (vertexz[OPS_ACC0(0, 0, 0)] + vertexz[OPS_ACC0(0, 0, 1)]);
celldz[OPS_ACC2(0, 0, 0)] = d_z;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_initialise_chunk_kernel_cellz(const double *__restrict arg0,
double *__restrict arg1,
double *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim0_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim0_initialise_chunk_kernel_cellz *
ydim0_initialise_chunk_kernel_cellz;
arg1 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim1_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim1_initialise_chunk_kernel_cellz *
ydim1_initialise_chunk_kernel_cellz;
arg2 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim2_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim2_initialise_chunk_kernel_cellz *
ydim2_initialise_chunk_kernel_cellz;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
initialise_chunk_kernel_cellz_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_cellz(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 54))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(54, "initialise_chunk_kernel_cellz");
OPS_kernels[54].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
if (xdim0 != xdim0_initialise_chunk_kernel_cellz_h ||
ydim0 != ydim0_initialise_chunk_kernel_cellz_h ||
xdim1 != xdim1_initialise_chunk_kernel_cellz_h ||
ydim1 != ydim1_initialise_chunk_kernel_cellz_h ||
xdim2 != xdim2_initialise_chunk_kernel_cellz_h ||
ydim2 != ydim2_initialise_chunk_kernel_cellz_h) {
hipMemcpyToSymbol(xdim0_initialise_chunk_kernel_cellz, &xdim0,
sizeof(int));
xdim0_initialise_chunk_kernel_cellz_h = xdim0;
hipMemcpyToSymbol(ydim0_initialise_chunk_kernel_cellz, &ydim0,
sizeof(int));
ydim0_initialise_chunk_kernel_cellz_h = ydim0;
hipMemcpyToSymbol(xdim1_initialise_chunk_kernel_cellz, &xdim1,
sizeof(int));
xdim1_initialise_chunk_kernel_cellz_h = xdim1;
hipMemcpyToSymbol(ydim1_initialise_chunk_kernel_cellz, &ydim1,
sizeof(int));
ydim1_initialise_chunk_kernel_cellz_h = ydim1;
hipMemcpyToSymbol(xdim2_initialise_chunk_kernel_cellz, &xdim2,
sizeof(int));
xdim2_initialise_chunk_kernel_cellz_h = xdim2;
hipMemcpyToSymbol(ydim2_initialise_chunk_kernel_cellz, &ydim2,
sizeof(int));
ydim2_initialise_chunk_kernel_cellz_h = ydim2;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_initialise_chunk_kernel_cellz), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[54].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
| 67d123b3d60ff3a8176a5036aefed9ceebbf150f.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_cellz;
int xdim0_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim0_initialise_chunk_kernel_cellz;
int ydim0_initialise_chunk_kernel_cellz_h = -1;
__constant__ int xdim1_initialise_chunk_kernel_cellz;
int xdim1_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim1_initialise_chunk_kernel_cellz;
int ydim1_initialise_chunk_kernel_cellz_h = -1;
__constant__ int xdim2_initialise_chunk_kernel_cellz;
int xdim2_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim2_initialise_chunk_kernel_cellz;
int ydim2_initialise_chunk_kernel_cellz_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x, y, z) \
(x + xdim0_initialise_chunk_kernel_cellz * (y) + \
xdim0_initialise_chunk_kernel_cellz * ydim0_initialise_chunk_kernel_cellz * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_initialise_chunk_kernel_cellz * (y) + \
xdim1_initialise_chunk_kernel_cellz * ydim1_initialise_chunk_kernel_cellz * \
(z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_initialise_chunk_kernel_cellz * (y) + \
xdim2_initialise_chunk_kernel_cellz * ydim2_initialise_chunk_kernel_cellz * \
(z))
// user function
__device__
void
initialise_chunk_kernel_cellz_gpu(const double *vertexz, double *cellz,
double *celldz) {
double d_z = (grid.zmax - grid.zmin) / (double)grid.z_cells;
cellz[OPS_ACC1(0, 0, 0)] =
0.5 * (vertexz[OPS_ACC0(0, 0, 0)] + vertexz[OPS_ACC0(0, 0, 1)]);
celldz[OPS_ACC2(0, 0, 0)] = d_z;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_initialise_chunk_kernel_cellz(const double *__restrict arg0,
double *__restrict arg1,
double *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim0_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim0_initialise_chunk_kernel_cellz *
ydim0_initialise_chunk_kernel_cellz;
arg1 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim1_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim1_initialise_chunk_kernel_cellz *
ydim1_initialise_chunk_kernel_cellz;
arg2 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim2_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim2_initialise_chunk_kernel_cellz *
ydim2_initialise_chunk_kernel_cellz;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
initialise_chunk_kernel_cellz_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_cellz(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 54))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(54, "initialise_chunk_kernel_cellz");
OPS_kernels[54].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
if (xdim0 != xdim0_initialise_chunk_kernel_cellz_h ||
ydim0 != ydim0_initialise_chunk_kernel_cellz_h ||
xdim1 != xdim1_initialise_chunk_kernel_cellz_h ||
ydim1 != ydim1_initialise_chunk_kernel_cellz_h ||
xdim2 != xdim2_initialise_chunk_kernel_cellz_h ||
ydim2 != ydim2_initialise_chunk_kernel_cellz_h) {
cudaMemcpyToSymbol(xdim0_initialise_chunk_kernel_cellz, &xdim0,
sizeof(int));
xdim0_initialise_chunk_kernel_cellz_h = xdim0;
cudaMemcpyToSymbol(ydim0_initialise_chunk_kernel_cellz, &ydim0,
sizeof(int));
ydim0_initialise_chunk_kernel_cellz_h = ydim0;
cudaMemcpyToSymbol(xdim1_initialise_chunk_kernel_cellz, &xdim1,
sizeof(int));
xdim1_initialise_chunk_kernel_cellz_h = xdim1;
cudaMemcpyToSymbol(ydim1_initialise_chunk_kernel_cellz, &ydim1,
sizeof(int));
ydim1_initialise_chunk_kernel_cellz_h = ydim1;
cudaMemcpyToSymbol(xdim2_initialise_chunk_kernel_cellz, &xdim2,
sizeof(int));
xdim2_initialise_chunk_kernel_cellz_h = xdim2;
cudaMemcpyToSymbol(ydim2_initialise_chunk_kernel_cellz, &ydim2,
sizeof(int));
ydim2_initialise_chunk_kernel_cellz_h = ydim2;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_initialise_chunk_kernel_cellz<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[54].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
|
4280b989987d99b3598bada660bd8336cf7d88ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008, 2009 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
*/
// $Id: ConfForceGPU.cu martbert $
// Maintainer: martbert
#include "ConfForceCompute.cuh"
#include <assert.h>
/*! \file ConfForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic confinement forces. Used by ConfForceComputeGPU.
*/
//! Kernel for caculating confinement forces on the GPU
extern "C" __global__
void gpu_compute_confslit_forces_kernel(float4* d_force,
const unsigned int *d_group_members,
unsigned int group_size,
const unsigned int N,
const Scalar4 *d_pos,
Scalar k,
Scalar roff,
unsigned int dflag)
{
// start by identifying which particle we are to handle
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_group_members[group_idx];
// read in position, velocity, net force, and mass
float4 pos = d_pos[idx];
// initialize the force to 0
float4 force = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (dflag == 1)
{
// get pos (FLOPS: 3)
float x = pos.x;
// Calculate distance to center
float r = fabsf(x);
//Verify that the particle is outside the offset radius
if (r > roff)
{
float forcedivr = - k * (r - roff) / r;
force.x += forcedivr * x;
}
} else if (dflag == 2)
{
// get pos (FLOPS: 3)
float y = pos.y;
// Calculate distance to center
float r = fabsf(y);
//Verify that the particle is outside the offset radius
if (r > roff)
{
float forcedivr = - k * (r - roff) / r;
force.y += forcedivr * y;
}
} else if (dflag == 3)
{
// get pos (FLOPS: 3)
float z = pos.z;
// Calculate distance to center
float r = fabsf(z);
//Verify that the particle is outside the offset radius
if (r > roff)
{
float forcedivr = - k * (r - roff) / r;
force.z += forcedivr * z;
}
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes);
d_force[idx] = force;
}
/*! \param force_data Force data on GPU to write forces to
\returns Any error code resulting from the kernel launch
\note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize()
*/
hipError_t gpu_compute_confslit_forces(float4* d_force,
const unsigned int *d_group_members,
unsigned int group_size,
const unsigned int N,
const Scalar4 *d_pos,
Scalar k,
Scalar roff,
unsigned int dflag,
unsigned int block_size)
{
// check that block_size is valid
assert(block_size != 0);
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)group_size / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
hipMemset(d_force, 0, sizeof(float4)*N);
hipLaunchKernelGGL(( gpu_compute_confslit_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force,d_group_members,group_size,N,d_pos,k,roff,dflag);
return hipSuccess;
}
| 4280b989987d99b3598bada660bd8336cf7d88ba.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008, 2009 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
*/
// $Id: ConfForceGPU.cu martbert $
// Maintainer: martbert
#include "ConfForceCompute.cuh"
#include <assert.h>
/*! \file ConfForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic confinement forces. Used by ConfForceComputeGPU.
*/
//! Kernel for caculating confinement forces on the GPU
extern "C" __global__
void gpu_compute_confslit_forces_kernel(float4* d_force,
const unsigned int *d_group_members,
unsigned int group_size,
const unsigned int N,
const Scalar4 *d_pos,
Scalar k,
Scalar roff,
unsigned int dflag)
{
// start by identifying which particle we are to handle
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_group_members[group_idx];
// read in position, velocity, net force, and mass
float4 pos = d_pos[idx];
// initialize the force to 0
float4 force = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (dflag == 1)
{
// get pos (FLOPS: 3)
float x = pos.x;
// Calculate distance to center
float r = fabsf(x);
//Verify that the particle is outside the offset radius
if (r > roff)
{
float forcedivr = - k * (r - roff) / r;
force.x += forcedivr * x;
}
} else if (dflag == 2)
{
// get pos (FLOPS: 3)
float y = pos.y;
// Calculate distance to center
float r = fabsf(y);
//Verify that the particle is outside the offset radius
if (r > roff)
{
float forcedivr = - k * (r - roff) / r;
force.y += forcedivr * y;
}
} else if (dflag == 3)
{
// get pos (FLOPS: 3)
float z = pos.z;
// Calculate distance to center
float r = fabsf(z);
//Verify that the particle is outside the offset radius
if (r > roff)
{
float forcedivr = - k * (r - roff) / r;
force.z += forcedivr * z;
}
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes);
d_force[idx] = force;
}
/*! \param force_data Force data on GPU to write forces to
\returns Any error code resulting from the kernel launch
\note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize()
*/
cudaError_t gpu_compute_confslit_forces(float4* d_force,
const unsigned int *d_group_members,
unsigned int group_size,
const unsigned int N,
const Scalar4 *d_pos,
Scalar k,
Scalar roff,
unsigned int dflag,
unsigned int block_size)
{
// check that block_size is valid
assert(block_size != 0);
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)group_size / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
cudaMemset(d_force, 0, sizeof(float4)*N);
gpu_compute_confslit_forces_kernel<<< grid, threads>>>(d_force,d_group_members,group_size,N,d_pos,k,roff,dflag);
return cudaSuccess;
}
|
7d622c6eb9bfcd82f6bbac702e1b9158029c1360.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_constants.h>
#include <stdio.h>
#include "../../Gpus/Array.cuh"
#include "../GPUConstants.generated.cu"
#include "../GPUDataPoint.generated.cu"
#include "../GPUSplit.generated.cu"
#include "GPUCategoricalDataPoint.generated.cu"
#include "../Entropy.cuh"
__device__
uint CategoryBitMask(const int categoryId) {
return ((uint)1) << categoryId;
}
__device__
float IncrementalEntropy(
Array<float> classDistributionRight,
Array<float> additionalRight,
Array<float> totalClassDistribution,
const float totalLeft,
const float totalRight) {
float entropyLeft = 0.0f;
float entropyRight = 0.0f;
for (int classId = 0; classId < Constants_MaxClasses; ++classId) {
const float rightFrequency = classDistributionRight.at(classId) + additionalRight.at(classId);
const float leftFrequency = totalClassDistribution.at(classId) - rightFrequency;
entropyLeft += Entropy(totalLeft == 0 ? 0 : leftFrequency / totalLeft);
entropyRight += Entropy(totalRight == 0 ? 0 : rightFrequency / totalRight);
}
entropyLeft /= Constants_MaxClasses;
entropyRight /= Constants_MaxClasses;
const float total = totalLeft + totalRight;
const float entropy =
entropyLeft * (totalLeft / total) +
entropyRight * (totalRight / total);
return entropy;
}
__kernel void spcCopyDataPointsPerAxis(const DecisionLearnerContext* context, CategoricalDataPoint* categoricalPointsBuffer) {
Array2D<CategoricalDataPoint> allCategoricalPoints(categoricalPointsBuffer, context->NumCategoricalAxes, context->NumDataPoints);
Array<int> dataPointIds(context->DataPointIds, context->NumDataPoints);
for (int openNodeIndex = blockIdx.x; openNodeIndex < context->NumOpenNodes; openNodeIndex += gridDim.x) {
const int nodeId = context->OpenNodeIds[openNodeIndex];
if (nodeId == -1) {
continue;
}
const Node* node = &context->Nodes[nodeId];
for (int col = threadIdx.x; col < node->RangeLength; col += blockDim.x) {
const int dataPointId = dataPointIds.at(node->RangeStart + col);
const DataPoint* dataPoint = &context->DataPoints[dataPointId];
for (int axisId = 0; axisId < context->NumCategoricalAxes; ++axisId) {
CategoricalDataPoint* __restrict__ categoricalPoint = &allCategoricalPoints.at(axisId, node->RangeStart + col);
categoricalPoint->DataPointId = dataPointId;
categoricalPoint->Weight = dataPoint->Weight;
categoricalPoint->Class = dataPoint->Class;
categoricalPoint->Categories = dataPoint->AllCategories[axisId];
}
}
}
}
__device__ void spcCalculateBestSplitForNodeAxis(
const DecisionLearnerContext* context,
const Node* node,
const int axisId,
const Array2D<CategoricalDataPoint> allCategoricalPoints,
Split* __restrict__ split) {
const float Precision = 0.001f;
__shared__ float incrementalFrequenciesBuffer[Constants_MaxCategories * Constants_MaxClasses];
__shared__ float incrementalEntropyPerCategoryBuffer[Constants_MaxCategories];
__shared__ float bestIncrementalEntropy;
__shared__ float bestIncrementalCategory;
__shared__ float classDistributionRightBuffer[Constants_MaxClasses];
__shared__ uint categoriesRight;
__shared__ float bestOverallEntropy;
__shared__ uint bestOverallCategories;
Array2D<float> incrementalFrequenciesPerCategory(incrementalFrequenciesBuffer, Constants_MaxCategories, Constants_MaxClasses);
Array<float> incrementalEntropyPerCategory(incrementalEntropyPerCategoryBuffer, Constants_MaxCategories);
Array<float> classDistributionRight(classDistributionRightBuffer, Constants_MaxClasses);
const Array<float> totalClassDistribution(node->ClassDistribution, Constants_MaxClasses);
// Zero frequencies
if (threadIdx.x == 0) {
categoriesRight = 0;
bestOverallEntropy = node->Entropy;
bestOverallCategories = 0;
}
for (int classId = threadIdx.x; classId < Constants_MaxClasses; classId += blockDim.x) {
classDistributionRight.at(classId) = 0.0f;
}
__syncthreads();
for (int addCategoryIteration = 0; addCategoryIteration < Constants_MaxCategories; ++addCategoryIteration) {
// Initialize additional frequencies with zeroes
for (int categoryId = threadIdx.x; categoryId < Constants_MaxCategories; categoryId += blockDim.x) {
incrementalEntropyPerCategory.at(categoryId) = CUDART_INF_F;
for (int classId = 0; classId < Constants_MaxClasses; ++classId) {
incrementalFrequenciesPerCategory.at(categoryId, classId) = 0.0f;
}
}
__syncthreads();
// For each category, what would the effect be on the weight distribution if this category was moved to the right?
for (int col = threadIdx.x; col < node->RangeLength; col += blockDim.x) {
const CategoricalDataPoint* categoricalPoint = &allCategoricalPoints.at(axisId, node->RangeStart + col);
if ((categoricalPoint->Categories & categoriesRight) != 0) {
// This data point is already on the right, can't add it again
continue;
}
for (int categoryId = 0; categoryId < Constants_MaxCategories; ++categoryId) {
uint categoryBitMask = CategoryBitMask(categoryId);
if ((categoryBitMask & categoriesRight) != 0) {
// This category is already on the right, can't add it again
continue;
}
if ((categoricalPoint->Categories & categoryBitMask) != 0) {
float* __restrict__ frequency = &incrementalFrequenciesPerCategory.at(categoryId, categoricalPoint->Class);
atomicAdd(frequency, categoricalPoint->Weight);
}
}
}
__syncthreads();
// For each category, if it was added, what would the new entropy be?
for (int categoryId = threadIdx.x; categoryId < Constants_MaxCategories; categoryId += blockDim.x) {
uint categoryBitMask = CategoryBitMask(categoryId);
if ((categoryBitMask & categoriesRight) != 0) { continue; }
Array<float> additionalRight = incrementalFrequenciesPerCategory.slice(categoryId);
float total = 0.0f;
float totalRight = 0.0f;
float totalAddition = 0.0f;
for (int classId = 0; classId < Constants_MaxClasses; ++classId) {
total += totalClassDistribution.at(classId);
totalRight += classDistributionRight.at(classId) + additionalRight.at(classId);
totalAddition += additionalRight.at(classId);
}
const float totalLeft = total - totalRight;
if (totalAddition <= Precision || totalLeft <= Precision || totalRight <= Precision) {
// This split achieves nothing
continue;
} else {
float* __restrict__ incrementalEntropy = &incrementalEntropyPerCategory.at(categoryId);
*incrementalEntropy = IncrementalEntropy(classDistributionRight, additionalRight, totalClassDistribution, totalLeft, totalRight);
assert(*incrementalEntropy <= node->Entropy + Precision);
}
}
__syncthreads();
// Which additional category has the best entropy?
if (threadIdx.x == 0) {
bestIncrementalEntropy = CUDART_INF_F;
bestIncrementalCategory = -1;
for (int categoryId = 0; categoryId < Constants_MaxCategories; ++categoryId) {
uint categoryBitMask = CategoryBitMask(categoryId);
if ((categoryBitMask & categoriesRight) != 0) { continue; }
const float entropy = incrementalEntropyPerCategory.at(categoryId);
if (entropy < bestIncrementalEntropy) {
bestIncrementalEntropy = entropy;
bestIncrementalCategory = categoryId;
}
}
}
__syncthreads();
// Add new incremental category
if (bestIncrementalCategory != -1) {
const Array<float> additionalRight = incrementalFrequenciesPerCategory.slice(bestIncrementalCategory);
for (int classId = threadIdx.x; classId < Constants_MaxClasses; classId += blockDim.x) {
float* __restrict__ rightFrequency = &classDistributionRight.at(classId);
*rightFrequency += additionalRight.at(classId);
}
if (threadIdx.x == 0) {
categoriesRight |= CategoryBitMask(bestIncrementalCategory);
if (bestIncrementalEntropy < bestOverallEntropy) {
bestOverallEntropy = bestIncrementalEntropy;
bestOverallCategories = categoriesRight;
}
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
assert(bestOverallEntropy <= node->Entropy);
if (bestOverallCategories != 0) {
split->Entropy = bestOverallEntropy;
split->SplitType = Constants_SplitType_Categorical;
split->Axis = axisId;
split->Column = -1;
split->SplitCategories = bestOverallCategories;
split->SplitAttribute = 999;
} else {
split->Entropy = 999;
split->SplitType = Constants_SplitType_Null;
split->Axis = 0xFF;
split->Column = -1;
split->SplitCategories = 999;
split->SplitAttribute = 999;
}
}
}
__kernel void spcBestCategoricalSplitPerAxis(
const DecisionLearnerContext* context,
CategoricalDataPoint* categoricalPointsBuffer,
Split* splits,
int splitWriteStart) {
Array2D<CategoricalDataPoint> allCategoricalPoints(categoricalPointsBuffer, context->NumCategoricalAxes, context->NumDataPoints);
Array2D<Split> allSplits(splits, context->MaxOpenNodes, Constants_MaxSplits);
const int numBlocks = context->NumCategoricalAxes * context->NumOpenNodes;
for (int i = blockIdx.x; i < numBlocks; i += gridDim.x) {
const int openNodeIndex = i / context->NumCategoricalAxes;
const int axisId = i % context->NumCategoricalAxes;
const int nodeId = context->OpenNodeIds[openNodeIndex];
if (nodeId == -1) {
continue;
}
const Node* node = &context->Nodes[nodeId];
Split* __restrict__ split = &allSplits.at(openNodeIndex, splitWriteStart + axisId);
spcCalculateBestSplitForNodeAxis(context, node, axisId, allCategoricalPoints, split);
}
}
__device__
void spcApplyOptimalSplitToNode(
DecisionLearnerContext* context,
Array2D<CategoricalDataPoint> allCategoricalPoints,
const Split* bestSplit,
const int nodeId,
Node* __restrict__ node,
uint8_t* allSortKeys) {
__shared__ int splitColumn;
Array<int> allDataPointIds(context->DataPointIds, context->NumDataPoints);
const int axisId = bestSplit->Axis;
// Split data point IDs into left and right
for (int col = threadIdx.x; col < node->RangeLength; col += blockDim.x) {
const CategoricalDataPoint* categoricalPoint = &allCategoricalPoints.at(axisId, node->RangeStart + col);
allSortKeys[node->RangeStart + col] = (categoricalPoint->Categories & bestSplit->SplitCategories) ? 0 : 1; // This order because sort is descending and the sort value is a unsigned byte
}
__syncthreads();
sortBitonic(&allSortKeys[node->RangeStart], &allDataPointIds.at(node->RangeStart), node->RangeLength);
// Find where the split point is
if (threadIdx.x == 0) {
splitColumn = -1;
}
__syncthreads();
for (int col = threadIdx.x; col < node->RangeLength; col += blockDim.x) {
if (col == 0) {
continue;
} else if (allSortKeys[node->RangeStart + col - 1] != allSortKeys[node->RangeStart + col]) {
splitColumn = col;
}
}
__syncthreads();
if (threadIdx.x == 0 && splitColumn != -1) {
// Update parent split fields
node->SplitType = Constants_SplitType_Categorical;
node->SplitAttribute = 999;
node->SplitCategories = bestSplit->SplitCategories;
node->SplitAxis = bestSplit->Axis;
node->LeftChild = nodeId * 2 + 1;
node->RightChild = nodeId * 2 + 2;
// Write new child nodes
Node* __restrict__ leftNode = &context->Nodes[node->LeftChild];
Node* __restrict__ rightNode = &context->Nodes[node->RightChild];
leftNode->RangeStart = node->RangeStart;
leftNode->RangeLength = splitColumn;
rightNode->RangeStart = node->RangeStart + splitColumn;
rightNode->RangeLength = node->RangeLength - splitColumn;
// Add to next open list
const int leftOpenIndex = atomicAdd(&context->NumNextOpenNodes, 2);
const int rightOpenIndex = leftOpenIndex + 1;
int* __restrict__ nextOpenNodes = context->NextOpenNodeIds;
nextOpenNodes[leftOpenIndex] = node->LeftChild;
nextOpenNodes[rightOpenIndex] = node->RightChild;
}
}
__kernel void spcApplyOptimalSplit(
DecisionLearnerContext* context,
const CategoricalDataPoint* categoricalPointsBuffer,
const Split* bestSplitsBuffer,
uint8_t* sortKeys) {
Array2D<CategoricalDataPoint> allCategoricalPoints(categoricalPointsBuffer, context->NumCategoricalAxes, context->NumDataPoints);
const Array<Split> bestSplits(bestSplitsBuffer, context->MaxOpenNodes);
for (int openNodeIndex = blockIdx.x; openNodeIndex < context->NumOpenNodes; openNodeIndex += gridDim.x) {
const int nodeId = context->OpenNodeIds[openNodeIndex];
if (nodeId == -1) {
continue;
}
Node* __restrict__ node = &context->Nodes[nodeId];
const Split* bestSplit = &bestSplits.at(openNodeIndex);
if (bestSplit->SplitType != Constants_SplitType_Categorical) {
continue;
}
spcApplyOptimalSplitToNode(context, allCategoricalPoints, bestSplit, nodeId, node, sortKeys);
}
} | 7d622c6eb9bfcd82f6bbac702e1b9158029c1360.cu | #include <math_constants.h>
#include <stdio.h>
#include "../../Gpus/Array.cuh"
#include "../GPUConstants.generated.cu"
#include "../GPUDataPoint.generated.cu"
#include "../GPUSplit.generated.cu"
#include "GPUCategoricalDataPoint.generated.cu"
#include "../Entropy.cuh"
__device__
uint CategoryBitMask(const int categoryId) {
return ((uint)1) << categoryId;
}
__device__
float IncrementalEntropy(
Array<float> classDistributionRight,
Array<float> additionalRight,
Array<float> totalClassDistribution,
const float totalLeft,
const float totalRight) {
float entropyLeft = 0.0f;
float entropyRight = 0.0f;
for (int classId = 0; classId < Constants_MaxClasses; ++classId) {
const float rightFrequency = classDistributionRight.at(classId) + additionalRight.at(classId);
const float leftFrequency = totalClassDistribution.at(classId) - rightFrequency;
entropyLeft += Entropy(totalLeft == 0 ? 0 : leftFrequency / totalLeft);
entropyRight += Entropy(totalRight == 0 ? 0 : rightFrequency / totalRight);
}
entropyLeft /= Constants_MaxClasses;
entropyRight /= Constants_MaxClasses;
const float total = totalLeft + totalRight;
const float entropy =
entropyLeft * (totalLeft / total) +
entropyRight * (totalRight / total);
return entropy;
}
__kernel void spcCopyDataPointsPerAxis(const DecisionLearnerContext* context, CategoricalDataPoint* categoricalPointsBuffer) {
Array2D<CategoricalDataPoint> allCategoricalPoints(categoricalPointsBuffer, context->NumCategoricalAxes, context->NumDataPoints);
Array<int> dataPointIds(context->DataPointIds, context->NumDataPoints);
for (int openNodeIndex = blockIdx.x; openNodeIndex < context->NumOpenNodes; openNodeIndex += gridDim.x) {
const int nodeId = context->OpenNodeIds[openNodeIndex];
if (nodeId == -1) {
continue;
}
const Node* node = &context->Nodes[nodeId];
for (int col = threadIdx.x; col < node->RangeLength; col += blockDim.x) {
const int dataPointId = dataPointIds.at(node->RangeStart + col);
const DataPoint* dataPoint = &context->DataPoints[dataPointId];
for (int axisId = 0; axisId < context->NumCategoricalAxes; ++axisId) {
CategoricalDataPoint* __restrict__ categoricalPoint = &allCategoricalPoints.at(axisId, node->RangeStart + col);
categoricalPoint->DataPointId = dataPointId;
categoricalPoint->Weight = dataPoint->Weight;
categoricalPoint->Class = dataPoint->Class;
categoricalPoint->Categories = dataPoint->AllCategories[axisId];
}
}
}
}
__device__ void spcCalculateBestSplitForNodeAxis(
const DecisionLearnerContext* context,
const Node* node,
const int axisId,
const Array2D<CategoricalDataPoint> allCategoricalPoints,
Split* __restrict__ split) {
const float Precision = 0.001f;
__shared__ float incrementalFrequenciesBuffer[Constants_MaxCategories * Constants_MaxClasses];
__shared__ float incrementalEntropyPerCategoryBuffer[Constants_MaxCategories];
__shared__ float bestIncrementalEntropy;
__shared__ float bestIncrementalCategory;
__shared__ float classDistributionRightBuffer[Constants_MaxClasses];
__shared__ uint categoriesRight;
__shared__ float bestOverallEntropy;
__shared__ uint bestOverallCategories;
Array2D<float> incrementalFrequenciesPerCategory(incrementalFrequenciesBuffer, Constants_MaxCategories, Constants_MaxClasses);
Array<float> incrementalEntropyPerCategory(incrementalEntropyPerCategoryBuffer, Constants_MaxCategories);
Array<float> classDistributionRight(classDistributionRightBuffer, Constants_MaxClasses);
const Array<float> totalClassDistribution(node->ClassDistribution, Constants_MaxClasses);
// Zero frequencies
if (threadIdx.x == 0) {
categoriesRight = 0;
bestOverallEntropy = node->Entropy;
bestOverallCategories = 0;
}
for (int classId = threadIdx.x; classId < Constants_MaxClasses; classId += blockDim.x) {
classDistributionRight.at(classId) = 0.0f;
}
__syncthreads();
for (int addCategoryIteration = 0; addCategoryIteration < Constants_MaxCategories; ++addCategoryIteration) {
// Initialize additional frequencies with zeroes
for (int categoryId = threadIdx.x; categoryId < Constants_MaxCategories; categoryId += blockDim.x) {
incrementalEntropyPerCategory.at(categoryId) = CUDART_INF_F;
for (int classId = 0; classId < Constants_MaxClasses; ++classId) {
incrementalFrequenciesPerCategory.at(categoryId, classId) = 0.0f;
}
}
__syncthreads();
// For each category, what would the effect be on the weight distribution if this category was moved to the right?
for (int col = threadIdx.x; col < node->RangeLength; col += blockDim.x) {
const CategoricalDataPoint* categoricalPoint = &allCategoricalPoints.at(axisId, node->RangeStart + col);
if ((categoricalPoint->Categories & categoriesRight) != 0) {
// This data point is already on the right, can't add it again
continue;
}
for (int categoryId = 0; categoryId < Constants_MaxCategories; ++categoryId) {
uint categoryBitMask = CategoryBitMask(categoryId);
if ((categoryBitMask & categoriesRight) != 0) {
// This category is already on the right, can't add it again
continue;
}
if ((categoricalPoint->Categories & categoryBitMask) != 0) {
float* __restrict__ frequency = &incrementalFrequenciesPerCategory.at(categoryId, categoricalPoint->Class);
atomicAdd(frequency, categoricalPoint->Weight);
}
}
}
__syncthreads();
// For each category, if it was added, what would the new entropy be?
for (int categoryId = threadIdx.x; categoryId < Constants_MaxCategories; categoryId += blockDim.x) {
uint categoryBitMask = CategoryBitMask(categoryId);
if ((categoryBitMask & categoriesRight) != 0) { continue; }
Array<float> additionalRight = incrementalFrequenciesPerCategory.slice(categoryId);
float total = 0.0f;
float totalRight = 0.0f;
float totalAddition = 0.0f;
for (int classId = 0; classId < Constants_MaxClasses; ++classId) {
total += totalClassDistribution.at(classId);
totalRight += classDistributionRight.at(classId) + additionalRight.at(classId);
totalAddition += additionalRight.at(classId);
}
const float totalLeft = total - totalRight;
if (totalAddition <= Precision || totalLeft <= Precision || totalRight <= Precision) {
// This split achieves nothing
continue;
} else {
float* __restrict__ incrementalEntropy = &incrementalEntropyPerCategory.at(categoryId);
*incrementalEntropy = IncrementalEntropy(classDistributionRight, additionalRight, totalClassDistribution, totalLeft, totalRight);
assert(*incrementalEntropy <= node->Entropy + Precision);
}
}
__syncthreads();
// Which additional category has the best entropy?
if (threadIdx.x == 0) {
bestIncrementalEntropy = CUDART_INF_F;
bestIncrementalCategory = -1;
for (int categoryId = 0; categoryId < Constants_MaxCategories; ++categoryId) {
uint categoryBitMask = CategoryBitMask(categoryId);
if ((categoryBitMask & categoriesRight) != 0) { continue; }
const float entropy = incrementalEntropyPerCategory.at(categoryId);
if (entropy < bestIncrementalEntropy) {
bestIncrementalEntropy = entropy;
bestIncrementalCategory = categoryId;
}
}
}
__syncthreads();
// Add new incremental category
if (bestIncrementalCategory != -1) {
const Array<float> additionalRight = incrementalFrequenciesPerCategory.slice(bestIncrementalCategory);
for (int classId = threadIdx.x; classId < Constants_MaxClasses; classId += blockDim.x) {
float* __restrict__ rightFrequency = &classDistributionRight.at(classId);
*rightFrequency += additionalRight.at(classId);
}
if (threadIdx.x == 0) {
categoriesRight |= CategoryBitMask(bestIncrementalCategory);
if (bestIncrementalEntropy < bestOverallEntropy) {
bestOverallEntropy = bestIncrementalEntropy;
bestOverallCategories = categoriesRight;
}
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
assert(bestOverallEntropy <= node->Entropy);
if (bestOverallCategories != 0) {
split->Entropy = bestOverallEntropy;
split->SplitType = Constants_SplitType_Categorical;
split->Axis = axisId;
split->Column = -1;
split->SplitCategories = bestOverallCategories;
split->SplitAttribute = 999;
} else {
split->Entropy = 999;
split->SplitType = Constants_SplitType_Null;
split->Axis = 0xFF;
split->Column = -1;
split->SplitCategories = 999;
split->SplitAttribute = 999;
}
}
}
__kernel void spcBestCategoricalSplitPerAxis(
const DecisionLearnerContext* context,
CategoricalDataPoint* categoricalPointsBuffer,
Split* splits,
int splitWriteStart) {
Array2D<CategoricalDataPoint> allCategoricalPoints(categoricalPointsBuffer, context->NumCategoricalAxes, context->NumDataPoints);
Array2D<Split> allSplits(splits, context->MaxOpenNodes, Constants_MaxSplits);
const int numBlocks = context->NumCategoricalAxes * context->NumOpenNodes;
for (int i = blockIdx.x; i < numBlocks; i += gridDim.x) {
const int openNodeIndex = i / context->NumCategoricalAxes;
const int axisId = i % context->NumCategoricalAxes;
const int nodeId = context->OpenNodeIds[openNodeIndex];
if (nodeId == -1) {
continue;
}
const Node* node = &context->Nodes[nodeId];
Split* __restrict__ split = &allSplits.at(openNodeIndex, splitWriteStart + axisId);
spcCalculateBestSplitForNodeAxis(context, node, axisId, allCategoricalPoints, split);
}
}
__device__
void spcApplyOptimalSplitToNode(
DecisionLearnerContext* context,
Array2D<CategoricalDataPoint> allCategoricalPoints,
const Split* bestSplit,
const int nodeId,
Node* __restrict__ node,
uint8_t* allSortKeys) {
__shared__ int splitColumn;
Array<int> allDataPointIds(context->DataPointIds, context->NumDataPoints);
const int axisId = bestSplit->Axis;
// Split data point IDs into left and right
for (int col = threadIdx.x; col < node->RangeLength; col += blockDim.x) {
const CategoricalDataPoint* categoricalPoint = &allCategoricalPoints.at(axisId, node->RangeStart + col);
allSortKeys[node->RangeStart + col] = (categoricalPoint->Categories & bestSplit->SplitCategories) ? 0 : 1; // This order because sort is descending and the sort value is a unsigned byte
}
__syncthreads();
sortBitonic(&allSortKeys[node->RangeStart], &allDataPointIds.at(node->RangeStart), node->RangeLength);
// Find where the split point is
if (threadIdx.x == 0) {
splitColumn = -1;
}
__syncthreads();
for (int col = threadIdx.x; col < node->RangeLength; col += blockDim.x) {
if (col == 0) {
continue;
} else if (allSortKeys[node->RangeStart + col - 1] != allSortKeys[node->RangeStart + col]) {
splitColumn = col;
}
}
__syncthreads();
if (threadIdx.x == 0 && splitColumn != -1) {
// Update parent split fields
node->SplitType = Constants_SplitType_Categorical;
node->SplitAttribute = 999;
node->SplitCategories = bestSplit->SplitCategories;
node->SplitAxis = bestSplit->Axis;
node->LeftChild = nodeId * 2 + 1;
node->RightChild = nodeId * 2 + 2;
// Write new child nodes
Node* __restrict__ leftNode = &context->Nodes[node->LeftChild];
Node* __restrict__ rightNode = &context->Nodes[node->RightChild];
leftNode->RangeStart = node->RangeStart;
leftNode->RangeLength = splitColumn;
rightNode->RangeStart = node->RangeStart + splitColumn;
rightNode->RangeLength = node->RangeLength - splitColumn;
// Add to next open list
const int leftOpenIndex = atomicAdd(&context->NumNextOpenNodes, 2);
const int rightOpenIndex = leftOpenIndex + 1;
int* __restrict__ nextOpenNodes = context->NextOpenNodeIds;
nextOpenNodes[leftOpenIndex] = node->LeftChild;
nextOpenNodes[rightOpenIndex] = node->RightChild;
}
}
__kernel void spcApplyOptimalSplit(
DecisionLearnerContext* context,
const CategoricalDataPoint* categoricalPointsBuffer,
const Split* bestSplitsBuffer,
uint8_t* sortKeys) {
Array2D<CategoricalDataPoint> allCategoricalPoints(categoricalPointsBuffer, context->NumCategoricalAxes, context->NumDataPoints);
const Array<Split> bestSplits(bestSplitsBuffer, context->MaxOpenNodes);
for (int openNodeIndex = blockIdx.x; openNodeIndex < context->NumOpenNodes; openNodeIndex += gridDim.x) {
const int nodeId = context->OpenNodeIds[openNodeIndex];
if (nodeId == -1) {
continue;
}
Node* __restrict__ node = &context->Nodes[nodeId];
const Split* bestSplit = &bestSplits.at(openNodeIndex);
if (bestSplit->SplitType != Constants_SplitType_Categorical) {
continue;
}
spcApplyOptimalSplitToNode(context, allCategoricalPoints, bestSplit, nodeId, node, sortKeys);
}
} |
09a62780916534cee340720bd5e0df1c62013e51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/math/sample_prob.h"
#include "paddle/fluid/operators/math/softmax.h"
#include "paddle/fluid/operators/sample_logits_op.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
// UNDERSTAND: something like take_along_axis in numpy.
template <typename T>
__global__ void GPUTakeAlongD1(size_t size, const int batch_size,
const int array_slice_size,
const int idx_slice_size, const T* p_array,
const int64_t* p_index, T* p_value) {
const auto value_slice_size = idx_slice_size;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = blockDim.x * gridDim.x;
for (; idx < size; idx += step_size) {
int i = idx / idx_slice_size;
auto array_index = p_index[idx];
p_value[idx] = p_array[i * array_slice_size + array_index];
}
}
// UNDERSTAND: something like put_along_axis in numpy but if there is duplicate
// indices, scatter is done in += way.
template <typename T>
__global__ void GPUPutAlongD1(size_t size, const int batch_size,
const int array_slice_size,
const int idx_slice_size, T* p_array,
const int64_t* p_index, const T* p_value) {
const auto value_slice_size = idx_slice_size;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = blockDim.x * gridDim.x;
// size == batch_size
for (; idx < size; idx += step_size) {
int i = idx;
for (int j = 0; j < idx_slice_size; ++j) {
auto array_index = p_index[i * idx_slice_size + j];
p_array[i * array_slice_size + array_index] +=
p_value[i * idx_slice_size + j];
}
}
}
// UNDERSTAND: set label as 0,1,...,num_true-1
template <typename T>
__global__ void GPUSetLabel(size_t size, const int num_true, int64_t* p_array) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = blockDim.x * gridDim.x;
for (; idx < size; idx += step_size) {
p_array[idx] = idx % num_true;
}
}
// UNDERSTAND: compute accidentdal hits from samples and minus corresponding
// logits by a float max, here 1e20
template <typename T>
__global__ void gpu_compute_remove_accidental_hits(const int size,
const int num_true,
const int idx_slice_size,
const int64_t* p_index,
T* p_value) {
const auto value_slice_size = idx_slice_size;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = blockDim.x * gridDim.x;
for (; idx < size; idx += step_size) {
int i = idx / idx_slice_size;
if (idx % idx_slice_size < num_true) continue;
for (int j = 0; j < num_true; ++j) {
const auto true_idx = i * idx_slice_size + j;
if (p_index[true_idx] == p_index[idx]) {
p_value[idx] -= 1e20;
break;
}
}
}
}
template <typename T>
class SampleLogitsCUDAKernel : public framework::OpKernel<T> {
public:
using Tensor = framework::Tensor;
void Compute(const framework::ExecutionContext& context) const override {
// get necessary inputs
const Tensor* logits = context.Input<Tensor>("Logits");
const Tensor* labels = context.Input<Tensor>("Labels");
VLOG(3) << "Enter SampleLogitsCUDAKernel";
// get necessary outputs
Tensor* samples = context.Output<Tensor>("Samples");
Tensor* probabilities = context.Output<Tensor>("Probabilities");
Tensor* sampled_logits = context.Output<Tensor>("SampledLogits");
Tensor* sampled_labels = context.Output<Tensor>("SampledLabels");
// shapes
const auto batch_size = logits->dims()[0];
const auto num_classes = logits->dims()[1];
const auto labels_dim = labels->dims();
const auto num_true = labels_dim[1];
const auto samples_dim = samples->dims();
// attrs
const auto num_samples = context.Attr<int>("num_samples");
const bool use_customized_samples =
context.Attr<bool>("use_customized_samples");
const bool uniq = context.Attr<bool>("uniq");
const bool remove_accidental_hits =
context.Attr<bool>("remove_accidental_hits");
// device contexts
auto& dev_ctx = context.cuda_device_context();
// UNDERSTAND: allocate memories for temporaries
sampled_logits->mutable_data<T>(samples_dim, context.GetPlace());
phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, sampled_logits, static_cast<T>(0));
auto sampled_labels_data =
sampled_labels->mutable_data<int64_t>(labels_dim, context.GetPlace());
int threads = 512;
size_t size = batch_size * num_true;
int grid = (size + threads - 1) / threads;
hipLaunchKernelGGL(( GPUSetLabel<T>)
, dim3(grid), dim3(threads), 0, context.cuda_device_context().stream(),
size, num_true, sampled_labels_data);
if (use_customized_samples) {
const Tensor* customized_samples =
context.Input<Tensor>("CustomizedSamples");
const Tensor* customized_probabilities =
context.Input<Tensor>("CustomizedProbabilities");
PADDLE_ENFORCE_EQ(customized_samples, samples,
platform::errors::InvalidArgument(
"CustomizedSamples must be the same Tensor with "
"Samples when use_customized_samples = True"));
PADDLE_ENFORCE_EQ(
customized_probabilities, probabilities,
platform::errors::InvalidArgument(
"CustomizedProbabilities must be the same Tensor with "
"Probabilities when use_customized_samples = True"));
} else {
samples->mutable_data<int64_t>(context.GetPlace());
probabilities->mutable_data<T>(samples_dim, context.GetPlace());
// UNDERSTAND: sampling
const auto seed = context.Attr<int>("seed");
auto sampler_with_prob = math::GPUSampleWithProb<T>();
sampler_with_prob(context.cuda_device_context(), seed, num_classes, uniq,
num_samples, labels, samples, probabilities);
}
// UNDERSTAND: gather sampled logits and remove accidental hits if needed
const auto num_take = samples->dims()[1];
const auto array_dims = logits->dims();
const auto idx_dims = samples->dims();
const T* p_array = logits->data<T>();
const int64_t* p_index = samples->data<int64_t>();
T* p_value = sampled_logits->data<T>();
// src slice size
const auto array_slice_size = array_dims[1];
// index slice size
const auto idx_slice_size = idx_dims[1];
size = batch_size * num_take;
grid = (size + threads - 1) / threads;
hipLaunchKernelGGL(( GPUTakeAlongD1<T>)
, dim3(grid), dim3(threads), 0, context.cuda_device_context().stream(),
size, batch_size, array_slice_size, idx_slice_size, p_array,
p_index, p_value);
if (remove_accidental_hits) {
const size_t size = batch_size * (num_true + num_samples);
int grid = (size + threads - 1) / threads;
hipLaunchKernelGGL(( gpu_compute_remove_accidental_hits<T>)
, dim3(grid), dim3(threads), 0, context.cuda_device_context().stream(),
size, num_true, idx_slice_size, p_index, p_value);
}
// subtracted sampled logits with logQ(y|x)
auto probs = EigenMatrix<T>::From(*probabilities);
auto smp_logits = EigenMatrix<T>::From(*sampled_logits);
smp_logits.device(*dev_ctx.eigen_device()) =
(smp_logits - probs.log().unaryExpr(TolerableValue<T>()))
.unaryExpr(TolerableValue<T>());
}
};
template <typename T>
class SampleLogitsGradCUDAKernel : public framework::OpKernel<T> {
public:
using Tensor = framework::Tensor;
void Compute(const framework::ExecutionContext& context) const override {
auto logits_grad = context.Output<Tensor>(framework::GradVarName("Logits"));
const Tensor* samples = context.Input<Tensor>("Samples");
const Tensor* sampled_logits_grad =
context.Input<Tensor>(framework::GradVarName("SampledLogits"));
logits_grad->mutable_data<T>(context.GetPlace());
auto& dev_ctx = context.cuda_device_context();
phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, logits_grad, static_cast<T>(0));
// UNDERSTAND: scatter it back to logit_grad
const auto batch_size = samples->dims()[0];
const auto num_put = samples->dims()[1];
const auto array_dims = logits_grad->dims();
const auto idx_dims = samples->dims();
T* p_array = logits_grad->data<T>();
const int64_t* p_index = samples->data<int64_t>();
const T* p_value = sampled_logits_grad->data<T>();
// src slice size
const auto array_slice_size = array_dims[1];
// index slice size
const auto idx_slice_size = idx_dims[1];
int threads = 128;
const size_t size = batch_size;
int grid = (size + threads - 1) / threads;
hipLaunchKernelGGL(( GPUPutAlongD1<T>)
, dim3(grid), dim3(threads), 0, context.cuda_device_context().stream(),
size, batch_size, array_slice_size, idx_slice_size, p_array,
p_index, p_value);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sample_logits, ops::SampleLogitsCUDAKernel<float>,
ops::SampleLogitsCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(sample_logits_grad,
ops::SampleLogitsGradCUDAKernel<float>,
ops::SampleLogitsGradCUDAKernel<double>);
| 09a62780916534cee340720bd5e0df1c62013e51.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/math/sample_prob.h"
#include "paddle/fluid/operators/math/softmax.h"
#include "paddle/fluid/operators/sample_logits_op.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
// UNDERSTAND: something like take_along_axis in numpy.
template <typename T>
__global__ void GPUTakeAlongD1(size_t size, const int batch_size,
const int array_slice_size,
const int idx_slice_size, const T* p_array,
const int64_t* p_index, T* p_value) {
const auto value_slice_size = idx_slice_size;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = blockDim.x * gridDim.x;
for (; idx < size; idx += step_size) {
int i = idx / idx_slice_size;
auto array_index = p_index[idx];
p_value[idx] = p_array[i * array_slice_size + array_index];
}
}
// UNDERSTAND: something like put_along_axis in numpy but if there is duplicate
// indices, scatter is done in += way.
template <typename T>
__global__ void GPUPutAlongD1(size_t size, const int batch_size,
const int array_slice_size,
const int idx_slice_size, T* p_array,
const int64_t* p_index, const T* p_value) {
const auto value_slice_size = idx_slice_size;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = blockDim.x * gridDim.x;
// size == batch_size
for (; idx < size; idx += step_size) {
int i = idx;
for (int j = 0; j < idx_slice_size; ++j) {
auto array_index = p_index[i * idx_slice_size + j];
p_array[i * array_slice_size + array_index] +=
p_value[i * idx_slice_size + j];
}
}
}
// UNDERSTAND: set label as 0,1,...,num_true-1
template <typename T>
__global__ void GPUSetLabel(size_t size, const int num_true, int64_t* p_array) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = blockDim.x * gridDim.x;
for (; idx < size; idx += step_size) {
p_array[idx] = idx % num_true;
}
}
// UNDERSTAND: compute accidentdal hits from samples and minus corresponding
// logits by a float max, here 1e20
template <typename T>
__global__ void gpu_compute_remove_accidental_hits(const int size,
const int num_true,
const int idx_slice_size,
const int64_t* p_index,
T* p_value) {
const auto value_slice_size = idx_slice_size;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = blockDim.x * gridDim.x;
for (; idx < size; idx += step_size) {
int i = idx / idx_slice_size;
if (idx % idx_slice_size < num_true) continue;
for (int j = 0; j < num_true; ++j) {
const auto true_idx = i * idx_slice_size + j;
if (p_index[true_idx] == p_index[idx]) {
p_value[idx] -= 1e20;
break;
}
}
}
}
template <typename T>
class SampleLogitsCUDAKernel : public framework::OpKernel<T> {
public:
using Tensor = framework::Tensor;
void Compute(const framework::ExecutionContext& context) const override {
// get necessary inputs
const Tensor* logits = context.Input<Tensor>("Logits");
const Tensor* labels = context.Input<Tensor>("Labels");
VLOG(3) << "Enter SampleLogitsCUDAKernel";
// get necessary outputs
Tensor* samples = context.Output<Tensor>("Samples");
Tensor* probabilities = context.Output<Tensor>("Probabilities");
Tensor* sampled_logits = context.Output<Tensor>("SampledLogits");
Tensor* sampled_labels = context.Output<Tensor>("SampledLabels");
// shapes
const auto batch_size = logits->dims()[0];
const auto num_classes = logits->dims()[1];
const auto labels_dim = labels->dims();
const auto num_true = labels_dim[1];
const auto samples_dim = samples->dims();
// attrs
const auto num_samples = context.Attr<int>("num_samples");
const bool use_customized_samples =
context.Attr<bool>("use_customized_samples");
const bool uniq = context.Attr<bool>("uniq");
const bool remove_accidental_hits =
context.Attr<bool>("remove_accidental_hits");
// device contexts
auto& dev_ctx = context.cuda_device_context();
// UNDERSTAND: allocate memories for temporaries
sampled_logits->mutable_data<T>(samples_dim, context.GetPlace());
phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, sampled_logits, static_cast<T>(0));
auto sampled_labels_data =
sampled_labels->mutable_data<int64_t>(labels_dim, context.GetPlace());
int threads = 512;
size_t size = batch_size * num_true;
int grid = (size + threads - 1) / threads;
GPUSetLabel<T>
<<<grid, threads, 0, context.cuda_device_context().stream()>>>(
size, num_true, sampled_labels_data);
if (use_customized_samples) {
const Tensor* customized_samples =
context.Input<Tensor>("CustomizedSamples");
const Tensor* customized_probabilities =
context.Input<Tensor>("CustomizedProbabilities");
PADDLE_ENFORCE_EQ(customized_samples, samples,
platform::errors::InvalidArgument(
"CustomizedSamples must be the same Tensor with "
"Samples when use_customized_samples = True"));
PADDLE_ENFORCE_EQ(
customized_probabilities, probabilities,
platform::errors::InvalidArgument(
"CustomizedProbabilities must be the same Tensor with "
"Probabilities when use_customized_samples = True"));
} else {
samples->mutable_data<int64_t>(context.GetPlace());
probabilities->mutable_data<T>(samples_dim, context.GetPlace());
// UNDERSTAND: sampling
const auto seed = context.Attr<int>("seed");
auto sampler_with_prob = math::GPUSampleWithProb<T>();
sampler_with_prob(context.cuda_device_context(), seed, num_classes, uniq,
num_samples, labels, samples, probabilities);
}
// UNDERSTAND: gather sampled logits and remove accidental hits if needed
const auto num_take = samples->dims()[1];
const auto array_dims = logits->dims();
const auto idx_dims = samples->dims();
const T* p_array = logits->data<T>();
const int64_t* p_index = samples->data<int64_t>();
T* p_value = sampled_logits->data<T>();
// src slice size
const auto array_slice_size = array_dims[1];
// index slice size
const auto idx_slice_size = idx_dims[1];
size = batch_size * num_take;
grid = (size + threads - 1) / threads;
GPUTakeAlongD1<T>
<<<grid, threads, 0, context.cuda_device_context().stream()>>>(
size, batch_size, array_slice_size, idx_slice_size, p_array,
p_index, p_value);
if (remove_accidental_hits) {
const size_t size = batch_size * (num_true + num_samples);
int grid = (size + threads - 1) / threads;
gpu_compute_remove_accidental_hits<T>
<<<grid, threads, 0, context.cuda_device_context().stream()>>>(
size, num_true, idx_slice_size, p_index, p_value);
}
// subtracted sampled logits with logQ(y|x)
auto probs = EigenMatrix<T>::From(*probabilities);
auto smp_logits = EigenMatrix<T>::From(*sampled_logits);
smp_logits.device(*dev_ctx.eigen_device()) =
(smp_logits - probs.log().unaryExpr(TolerableValue<T>()))
.unaryExpr(TolerableValue<T>());
}
};
template <typename T>
class SampleLogitsGradCUDAKernel : public framework::OpKernel<T> {
public:
using Tensor = framework::Tensor;
void Compute(const framework::ExecutionContext& context) const override {
auto logits_grad = context.Output<Tensor>(framework::GradVarName("Logits"));
const Tensor* samples = context.Input<Tensor>("Samples");
const Tensor* sampled_logits_grad =
context.Input<Tensor>(framework::GradVarName("SampledLogits"));
logits_grad->mutable_data<T>(context.GetPlace());
auto& dev_ctx = context.cuda_device_context();
phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, logits_grad, static_cast<T>(0));
// UNDERSTAND: scatter it back to logit_grad
const auto batch_size = samples->dims()[0];
const auto num_put = samples->dims()[1];
const auto array_dims = logits_grad->dims();
const auto idx_dims = samples->dims();
T* p_array = logits_grad->data<T>();
const int64_t* p_index = samples->data<int64_t>();
const T* p_value = sampled_logits_grad->data<T>();
// src slice size
const auto array_slice_size = array_dims[1];
// index slice size
const auto idx_slice_size = idx_dims[1];
int threads = 128;
const size_t size = batch_size;
int grid = (size + threads - 1) / threads;
GPUPutAlongD1<T>
<<<grid, threads, 0, context.cuda_device_context().stream()>>>(
size, batch_size, array_slice_size, idx_slice_size, p_array,
p_index, p_value);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sample_logits, ops::SampleLogitsCUDAKernel<float>,
ops::SampleLogitsCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(sample_logits_grad,
ops::SampleLogitsGradCUDAKernel<float>,
ops::SampleLogitsGradCUDAKernel<double>);
|
405b89501eb9c1b41abc0a4fc245bc580d121ce4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void scatter_kernel( int *x_coors, int *y_coors, float *pfe_output, float *scattered_feature, const int MAX_NUM_PILLARS_, const int GRID_X_SIZE, const int GRID_Y_SIZE)
{
int i_pillar = blockIdx.x;
int i_feature = threadIdx.x;
int x_ind = x_coors[i_pillar];
int y_ind = y_coors[i_pillar];
float feature = pfe_output[i_feature*MAX_NUM_PILLARS_ + i_pillar];
scattered_feature[i_feature*GRID_Y_SIZE*GRID_X_SIZE + y_ind * GRID_X_SIZE + x_ind] = feature;
} | 405b89501eb9c1b41abc0a4fc245bc580d121ce4.cu | #include "includes.h"
__global__ void scatter_kernel( int *x_coors, int *y_coors, float *pfe_output, float *scattered_feature, const int MAX_NUM_PILLARS_, const int GRID_X_SIZE, const int GRID_Y_SIZE)
{
int i_pillar = blockIdx.x;
int i_feature = threadIdx.x;
int x_ind = x_coors[i_pillar];
int y_ind = y_coors[i_pillar];
float feature = pfe_output[i_feature*MAX_NUM_PILLARS_ + i_pillar];
scattered_feature[i_feature*GRID_Y_SIZE*GRID_X_SIZE + y_ind * GRID_X_SIZE + x_ind] = feature;
} |
2ea1092ccc91f4d69b8a27d206a95f848a71067a.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernels_hip.cuh"
#include "../../utils/gpu/debug.cuh"
#include "../../utils/gpu/common.cuh"
#include "../../utils/gpu/cuda_parameters.hpp"
#include "thrust/device_ptr.h"
#include "thrust/execution_policy.h"
#include "thrust/scan.h"
namespace SD {
/**shared_memory(relation_t * , relation_t * , args_t *, cudaParameters_t )
* Function to join small relations i.e both R and S are smaller than GPU
* memory. The function uses histogram stored in the GPU shared memory.
* hRelR : host side array for relation R.
* hRelS : host side array for relation S.
* agrs : arguments data structure needed for hash join.
* cudaParam : data structure storing the cuda parameters
*/
int shared_memory(relation_t *hRelR, relation_t *hRelS, args_t *args, cudaParameters_t *cudaParam) {
//setting up the logger variable
relation_t *relR = (relation_t *) malloc(sizeof(relation_t)); //Device side array for relation R
relation_t *relS = (relation_t *) malloc(sizeof(relation_t));; //Device side array for relation S
relation_t *relRn = (relation_t *) malloc(sizeof(relation_t)); //Device side array for partitioned relation R
relation_t *relSn = (relation_t *) malloc(sizeof(relation_t));; //Device side array for partitioned relation S
relR->numTuples = hRelR->numTuples;
relS->numTuples = hRelS->numTuples;
data *out; //GPU side output buffer
int *globalPtr; //The global pointer that is used to get the index of the output tuples.
//allocating memory for output buffer
hipMalloc((void **) &out, 2 * relS->numTuples * sizeof(data));
//allocating memory for the global pointer
hipMalloc((void **) &globalPtr, sizeof(data));
//allocating device memory for storing input data
hipMalloc((void **) &relR->id, relR->numTuples * sizeof(data));
hipMalloc((void **) &relR->key, relR->numTuples * sizeof(data));
hipMalloc((void **) &relS->id, relS->numTuples * sizeof(data));
hipMalloc((void **) &relS->key, relS->numTuples * sizeof(data));
//allocating device memory for storing partitioned data
relRn->numTuples = relR->numTuples;
relSn->numTuples = relS->numTuples;
hipMalloc((void **) &relRn->id, relRn->numTuples * sizeof(data));
hipMalloc((void **) &relRn->key, relRn->numTuples * sizeof(data));
hipMalloc((void **) &relSn->id, relSn->numTuples * sizeof(data));
hipMalloc((void **) &relSn->key, relSn->numTuples * sizeof(data));
//declaring device side histogram data
int *rHisto, *sHisto;
int *rnHisto, *snHisto; //To allow for histogram update during re-order.
//allocating device side memory for histogram. An additional entry is required for the last partition.
hipMalloc((void **) &rHisto, (args->pCount + 1) * sizeof(int));
hipMalloc((void **) &sHisto, (args->pCount + 1) * sizeof(int));
hipMalloc((void **) &rnHisto, (args->pCount + 1) * sizeof(int));
hipMalloc((void **) &snHisto, (args->pCount + 1) * sizeof(int));
//setting the global pointer to 0
hipMemset(globalPtr, 0, sizeof(int));
//initializing all histogram entries to 0
hipMemset(rHisto, 0, (args->pCount + 1) * sizeof(int));
hipMemset(sHisto, 0, (args->pCount + 1) * sizeof(int));
hipMemset(rnHisto, 0, (args->pCount + 1) * sizeof(int));
hipMemset(snHisto, 0, (args->pCount + 1) * sizeof(int));
//setting kernel thread dimensions
cudaParam->gridSize =
args->pCountL2; //to avoid the histogram buffer overflow. the size of Histo is pCountL1 * pCountL2
cudaParam->blockSize = MAX_BLOCK_SIZE;
//makign sure all cuda instruction before this point are completed before starting the time measurement
hipDeviceSynchronize();
//starting time measurement
hipEventRecord(cudaParam->start, cudaParam->streams[0]);
//copying Key of relation R to GPU for building the histogram
hipMemcpyAsync(relR->key, hRelR->key, relR->numTuples * sizeof(int), hipMemcpyHostToDevice, cudaParam->streams[0]);
//building histogram for relation R
histogram_build_L1 << < args->pCountL2, cudaParam->blockSize, args->pCountL1 * sizeof(int), cudaParam->streams[0] >>
> (relR->key, relR->numTuples, args->pCountL1, rHisto);
//getting the prefix sum of the level 1 histogram for relation R
thrust::exclusive_scan(thrust::hip::par.on(cudaParam->streams[0]),
thrust::device_pointer_cast(rHisto),
thrust::device_pointer_cast(rHisto) + (args->pCountL1 * cudaParam->gridSize + 1),
thrust::device_pointer_cast(rHisto));
//copying id of relation R to GPU
hipMemcpyAsync(relR->id, hRelR->id, relR->numTuples * sizeof(int), hipMemcpyHostToDevice, cudaParam->streams[1]);
//making sure that all histogram build and data copy are complete.
hipDeviceSynchronize();
//re-ordering relation R. This is the first level of partitioning
reorder_L1 << < args->pCountL2, cudaParam->blockSize, args->pCountL1 * sizeof(int), cudaParam->streams[0] >>
> (relR->key, relR->id, relR->numTuples, args->pCountL1, rHisto, relRn->key, relRn->id);
//copying Key of relation S to GPU for building the histogram
hipMemcpyAsync(relS->key, hRelS->key, relS->numTuples * sizeof(int), hipMemcpyHostToDevice, cudaParam->streams[2]);
//building histogram for second level of relation R partitioning
histogram_build_L2 << < args->pCountL1, cudaParam->blockSize, args->pCountL2 * sizeof(int), cudaParam->streams[0] >>
> (relRn->key, args->pCountL2, args->pCountL1, rnHisto, rHisto);
//building histogram for relation S
histogram_build_L1 << < args->pCountL2, cudaParam->blockSize, args->pCountL1 * sizeof(int), cudaParam->streams[2] >>
> (relS->key, relS->numTuples, args->pCountL1, sHisto);
//getting the prefix sum of the level 2 histogram for relation R
thrust::exclusive_scan(thrust::hip::par.on(cudaParam->streams[0]),
thrust::device_pointer_cast(rnHisto),
thrust::device_pointer_cast(rnHisto) + (args->pCountL1 * cudaParam->gridSize + 1),
thrust::device_pointer_cast(rnHisto));
//getting the prefix sum of the level 1 histogram for relation S
thrust::exclusive_scan(thrust::hip::par.on(cudaParam->streams[2]),
thrust::device_pointer_cast(sHisto),
thrust::device_pointer_cast(sHisto) + (args->pCountL1 * cudaParam->gridSize + 1),
thrust::device_pointer_cast(sHisto));
//copying id of relation S to GPU
hipMemcpyAsync(relS->id, hRelS->id, relS->numTuples * sizeof(int), hipMemcpyHostToDevice, cudaParam->streams[3]);
//re-ordering relation R. This is the second level of partitioning
reorder_L2 << < args->pCountL1, cudaParam->blockSize, args->pCountL2 * sizeof(int), cudaParam->streams[0] >>
> (relRn->key, relRn->id, args->pCountL2, args->pCountL1, rnHisto, rHisto, relR->key, relR->id);
//making sure the data transfer of id values of relation S is complete before re-ordering the realtion.
hipStreamSynchronize(cudaParam->streams[3]);
//re-ordering relation S. This is the first level of partitioning
reorder_L1 << < args->pCountL2, cudaParam->blockSize, args->pCountL1 * sizeof(int), cudaParam->streams[2] >>
> (relS->key, relS->id, relS->numTuples, args->pCountL1, sHisto, relSn->key, relSn->id);
//building histogram for second level of relation S partitioning
histogram_build_L2 << < args->pCountL1, cudaParam->blockSize, args->pCountL2 * sizeof(int), cudaParam->streams[2] >>
> (relSn->key, args->pCountL2, args->pCountL1, snHisto, sHisto);
//getting the prefix sum of the level 2 histogram for relation S
thrust::exclusive_scan(thrust::hip::par.on(cudaParam->streams[2]),
thrust::device_pointer_cast(snHisto),
thrust::device_pointer_cast(snHisto) + (args->pCountL1 * cudaParam->gridSize + 1),
thrust::device_pointer_cast(snHisto));
//re-ordering relation S. This is the second level of partitioning
reorder_L2 << < args->pCountL1, cudaParam->blockSize, args->pCountL2 * sizeof(int), cudaParam->streams[2] >>
> (relSn->key, relSn->id, args->pCountL2, args->pCountL1, snHisto, sHisto, relS->key, relS->id);
//ending time measurement
hipEventRecord(cudaParam->stop, cudaParam->streams[2]);
//making sure all CUDA processes are completed before ending the time measurement
hipDeviceSynchronize();
//measuring time
hipEventElapsedTime(&cudaParam->time, cudaParam->start, cudaParam->stop);
//displaying execution time
std::cout << "Partition Stage Execution Time for Shared Memory: " << cudaParam->time << " ms" << std::endl;
//starting time measurement
hipEventRecord(cudaParam->start, cudaParam->streams[0]);
//probe kernel invocation. We assume that the data distribution is uniform.
probe << < ::min(args->pCount, MAX_GRID_SIZE), ceil((float) relS->numTuples / args->pCount), 2
* ceil((float) relS->numTuples / args->pCount) * sizeof(int), cudaParam->streams[0] >>
> (relR->key, relR->id, relS->key, relS->id, rnHisto, snHisto, args->pCount, globalPtr, out);
//copying the results back to the CPU main memory. Assuming a 100% match rate.
hipMemcpyAsync(args->hOut[0], out, 2 * relS->numTuples * sizeof(int), hipMemcpyDeviceToHost, cudaParam->streams[0]);
//ending time measurement
hipEventRecord(cudaParam->stop, cudaParam->streams[0]);
//making sure all CUDA processes are completed before ending the time measurement
hipDeviceSynchronize();
//measuring time
hipEventElapsedTime(&cudaParam->time, cudaParam->start, cudaParam->stop);
//checking for any errors during execution
check_cuda_error((char *) __FILE__, __LINE__);
//displaying execution time
std::cout << "Join Stage Execution Time for Shared Memory: " << cudaParam->time << " ms" << std::endl;
//debug code
//displayGPUBuffer(rnHisto, args->hOut[0], args->pCount + 1);
//cleaning up all allocated data
hipFree(relR->id);
hipFree(relR->key);
hipFree(relS->id);
hipFree(relS->key);
hipFree(relRn->id);
hipFree(relRn->key);
hipFree(relSn->id);
hipFree(relSn->key);
hipFree(rHisto);
hipFree(sHisto);
hipFree(rnHisto);
hipFree(snHisto);
hipFree(out);
hipFree(globalPtr);
return 0;
}
/**global_memory(relation_t * , relation_t * , args_t *, cudaParameters_t )
* Function to join small relations using GPU global memory.
* hRelR : host side array for relation R.
* hRelS : host side array for relation S.
* agrs : arguments data structure needed for hash join.
* cudaParam : data structure storing the cuda parameters
*/
int global_memory(relation_t *hRelR, relation_t *hRelS, args_t *args, cudaParameters_t *cudaParam) {
//setting up the logger variable
relation_t *relR = (relation_t *) malloc(sizeof(relation_t)); //Device side array for relation R
relation_t *relS = (relation_t *) malloc(sizeof(relation_t));; //Device side array for relation S
relation_t *relRn = (relation_t *) malloc(sizeof(relation_t)); //Device side array for partitioned relation R
relation_t *relSn = (relation_t *) malloc(sizeof(relation_t));; //Device side array for partitioned relation S
relR->numTuples = hRelR->numTuples;
relS->numTuples = hRelS->numTuples;
data *out; //GPU side output buffer
int *globalPtr; //The global pointer that is used to get the index of the output tuples.
//allocating memory for output buffer
hipMalloc((void **) &out, 2 * relS->numTuples * sizeof(data));
//allocating memory for the global pointer
hipMalloc((void **) &globalPtr, sizeof(data));
//allocating device memory for storing input data
hipMalloc((void **) &relR->id, relR->numTuples * sizeof(data));
hipMalloc((void **) &relR->key, relR->numTuples * sizeof(data));
hipMalloc((void **) &relS->id, relS->numTuples * sizeof(data));
hipMalloc((void **) &relS->key, relS->numTuples * sizeof(data));
//allocating device memory for storing partitioned data
relRn->numTuples = relR->numTuples;
relSn->numTuples = relS->numTuples;
hipMalloc((void **) &relRn->id, relRn->numTuples * sizeof(data));
hipMalloc((void **) &relRn->key, relRn->numTuples * sizeof(data));
hipMalloc((void **) &relSn->id, relSn->numTuples * sizeof(data));
hipMalloc((void **) &relSn->key, relSn->numTuples * sizeof(data));
//declaring device side histogram data
int *rHisto, *sHisto;
int *rnHisto, *snHisto; //To allow for histogram update during re-order.
//allocating device side memory for histogram. An additional entry is required for the last partition.
hipMalloc((void **) &rHisto, (args->pCount + 1) * sizeof(int));
hipMalloc((void **) &sHisto, (args->pCount + 1) * sizeof(int));
hipMalloc((void **) &rnHisto, (args->pCount + 1) * sizeof(int));
hipMalloc((void **) &snHisto, (args->pCount + 1) * sizeof(int));
//setting the global pointer to 0
hipMemset(globalPtr, 0, sizeof(int));
//initializing all histogram entries to 0
hipMemset(rHisto, 0, (args->pCount + 1) * sizeof(int));
hipMemset(sHisto, 0, (args->pCount + 1) * sizeof(int));
hipMemset(rnHisto, 0, (args->pCount + 1) * sizeof(int));
hipMemset(snHisto, 0, (args->pCount + 1) * sizeof(int));
//setting kernel thread dimensions
cudaParam->gridSize =
args->pCountL2; //to avoid the histogram buffer overflow. the size of Histo is pCountL1 * pCountL2
cudaParam->blockSize = MAX_BLOCK_SIZE;
//makign sure all cuda instruction before this point are completed before starting the time measurement
hipDeviceSynchronize();
//starting time measurement
hipEventRecord(cudaParam->start, cudaParam->streams[0]);
//copying Key of relation R to GPU for building the histogram
hipMemcpyAsync(relR->key, hRelR->key, relR->numTuples * sizeof(int), hipMemcpyHostToDevice, cudaParam->streams[0]);
//building histogram for relation R
histogram_build_global << < ::min((int) (relR->numTuples / cudaParam->blockSize), MAX_GRID_SIZE),
cudaParam->blockSize, 0, cudaParam->streams[0] >> > (relR->key, relR->numTuples, args->pCount, rHisto);
//getting the prefix sum of the level 1 histogram for relation R
thrust::exclusive_scan(thrust::hip::par.on(cudaParam->streams[0]),
thrust::device_pointer_cast(rHisto),
thrust::device_pointer_cast(rHisto) + (args->pCount + 1),
thrust::device_pointer_cast(rHisto));
//copying id of relation R to GPU
hipMemcpyAsync(relR->id, hRelR->id, relR->numTuples * sizeof(int), hipMemcpyHostToDevice, cudaParam->streams[1]);
//creating a device side copy of relation R histogram for re-order kernel. Otehrwise re-order kernel will update the histogram making it unusable for the probe kernel.
hipMemcpyAsync(rnHisto, rHisto, (args->pCount + 1) * sizeof(int), hipMemcpyDeviceToDevice, cudaParam->streams[0]);
//making sure that all histogram build and data copy are complete.
hipDeviceSynchronize();
//re-ordering relation R. This is the first level of partitioning
reorder_global << < ::min((int) (relR->numTuples / cudaParam->blockSize), MAX_GRID_SIZE), cudaParam->blockSize, 0,
cudaParam->streams[0] >> > (relR->key, relR->id, relR->numTuples, args->pCount, rHisto, relRn->key, relRn->id);
//copying Key of relation S to GPU for building the histogram
hipMemcpyAsync(relS->key, hRelS->key, relS->numTuples * sizeof(int), hipMemcpyHostToDevice, cudaParam->streams[2]);
//building histogram for relation S
histogram_build_global << < ::min((int) (relS->numTuples / cudaParam->blockSize), MAX_GRID_SIZE),
cudaParam->blockSize, 0, cudaParam->streams[2] >> > (relS->key, relS->numTuples, args->pCount, sHisto);
//getting the prefix sum of the level 1 histogram for relation S
thrust::exclusive_scan(thrust::hip::par.on(cudaParam->streams[2]),
thrust::device_pointer_cast(sHisto),
thrust::device_pointer_cast(sHisto) + (args->pCount + 1),
thrust::device_pointer_cast(sHisto));
//copying id of relation S to GPU
hipMemcpyAsync(relS->id, hRelS->id, relS->numTuples * sizeof(int), hipMemcpyHostToDevice, cudaParam->streams[3]);
//creating a device side copy of relation S histogram for re-order kernel. Otehrwise re-order kernel will update the histogram making it unusable for the probe kernel.
hipMemcpyAsync(snHisto, sHisto, (args->pCount + 1) * sizeof(int), hipMemcpyDeviceToDevice, cudaParam->streams[2]);
//making sure the data transfer of id values of relation S is complete before re-ordering the realtion.
hipStreamSynchronize(cudaParam->streams[3]);
//re-ordering relation S. This is the first level of partitioning
reorder_global << < ::min((int) (relS->numTuples / cudaParam->blockSize), MAX_GRID_SIZE), cudaParam->blockSize, 0,
cudaParam->streams[2] >> > (relS->key, relS->id, relS->numTuples, args->pCount, sHisto, relSn->key, relSn->id);
//ending time measurement
hipEventRecord(cudaParam->stop, cudaParam->streams[2]);
//making sure all CUDA processes are completed before ending the time measurement
hipDeviceSynchronize();
//measuring time
hipEventElapsedTime(&cudaParam->time, cudaParam->start, cudaParam->stop);
//displaying execution time
std::cout << "Partition Stage Execution Time for Global Memory: " << cudaParam->time << " ms" << std::endl;
//starting time measurement
hipEventRecord(cudaParam->start, cudaParam->streams[0]);
//probe kernel invocation. We assume that the data distribution is uniform.
probe << < ::min(args->pCount, MAX_GRID_SIZE), ceil((float) relS->numTuples / args->pCount), 2
* ceil((float) relS->numTuples / args->pCount) * sizeof(int), cudaParam->streams[0] >>
> (relRn->key, relRn->id, relSn->key, relSn->id, rnHisto, snHisto, args->pCount, globalPtr, out);
//copying the results back to the CPU main memory. Assuming a 100% match rate.
hipMemcpyAsync(args->hOut[0], out, 2 * relS->numTuples * sizeof(int), hipMemcpyDeviceToHost, cudaParam->streams[0]);
//ending time measurement
hipEventRecord(cudaParam->stop, cudaParam->streams[0]);
//making sure all CUDA processes are completed before ending the time measurement
hipDeviceSynchronize();
//measuring time
hipEventElapsedTime(&cudaParam->time, cudaParam->start, cudaParam->stop);
//checking for any errors during execution
check_cuda_error((char *) __FILE__, __LINE__);
//displaying execution time
std::cout << "Join Stage Execution Time for Global Memory: " << cudaParam->time << " ms" << std::endl;
//debug code
//displayGPUBuffer(out, args->hOut[0], 2 * relRn->numTuples);
//cleaning up all allocated data
hipFree(relR->id);
hipFree(relR->key);
hipFree(relS->id);
hipFree(relS->key);
hipFree(relRn->id);
hipFree(relRn->key);
hipFree(relSn->id);
hipFree(relSn->key);
hipFree(rHisto);
hipFree(sHisto);
hipFree(rnHisto);
hipFree(snHisto);
hipFree(out);
hipFree(globalPtr);
return 0;
}
/**small_data_sm(relation_t * , relation_t * , args_t * )
* Function to join small relations with high match rate. The histogram is
* stored in the shared memory in this implementation.
* hRelR : host side array for relation R.
* hRelS : host side array for relation S.
* agrs : arguments data structure needed for hash join.
* cudaParam : data structure storing the cuda parameters
*/
} | 2ea1092ccc91f4d69b8a27d206a95f848a71067a.cu | #include "kernels.cuh"
#include "../../utils/gpu/debug.cuh"
#include "../../utils/gpu/common.cuh"
#include "../../utils/gpu/cuda_parameters.hpp"
#include "thrust/device_ptr.h"
#include "thrust/execution_policy.h"
#include "thrust/scan.h"
namespace SD {
/**shared_memory(relation_t * , relation_t * , args_t *, cudaParameters_t )
* Function to join small relations i.e both R and S are smaller than GPU
* memory. The function uses histogram stored in the GPU shared memory.
* hRelR : host side array for relation R.
* hRelS : host side array for relation S.
* agrs : arguments data structure needed for hash join.
* cudaParam : data structure storing the cuda parameters
*/
int shared_memory(relation_t *hRelR, relation_t *hRelS, args_t *args, cudaParameters_t *cudaParam) {
//setting up the logger variable
relation_t *relR = (relation_t *) malloc(sizeof(relation_t)); //Device side array for relation R
relation_t *relS = (relation_t *) malloc(sizeof(relation_t));; //Device side array for relation S
relation_t *relRn = (relation_t *) malloc(sizeof(relation_t)); //Device side array for partitioned relation R
relation_t *relSn = (relation_t *) malloc(sizeof(relation_t));; //Device side array for partitioned relation S
relR->numTuples = hRelR->numTuples;
relS->numTuples = hRelS->numTuples;
data *out; //GPU side output buffer
int *globalPtr; //The global pointer that is used to get the index of the output tuples.
//allocating memory for output buffer
cudaMalloc((void **) &out, 2 * relS->numTuples * sizeof(data));
//allocating memory for the global pointer
cudaMalloc((void **) &globalPtr, sizeof(data));
//allocating device memory for storing input data
cudaMalloc((void **) &relR->id, relR->numTuples * sizeof(data));
cudaMalloc((void **) &relR->key, relR->numTuples * sizeof(data));
cudaMalloc((void **) &relS->id, relS->numTuples * sizeof(data));
cudaMalloc((void **) &relS->key, relS->numTuples * sizeof(data));
//allocating device memory for storing partitioned data
relRn->numTuples = relR->numTuples;
relSn->numTuples = relS->numTuples;
cudaMalloc((void **) &relRn->id, relRn->numTuples * sizeof(data));
cudaMalloc((void **) &relRn->key, relRn->numTuples * sizeof(data));
cudaMalloc((void **) &relSn->id, relSn->numTuples * sizeof(data));
cudaMalloc((void **) &relSn->key, relSn->numTuples * sizeof(data));
//declaring device side histogram data
int *rHisto, *sHisto;
int *rnHisto, *snHisto; //To allow for histogram update during re-order.
//allocating device side memory for histogram. An additional entry is required for the last partition.
cudaMalloc((void **) &rHisto, (args->pCount + 1) * sizeof(int));
cudaMalloc((void **) &sHisto, (args->pCount + 1) * sizeof(int));
cudaMalloc((void **) &rnHisto, (args->pCount + 1) * sizeof(int));
cudaMalloc((void **) &snHisto, (args->pCount + 1) * sizeof(int));
//setting the global pointer to 0
cudaMemset(globalPtr, 0, sizeof(int));
//initializing all histogram entries to 0
cudaMemset(rHisto, 0, (args->pCount + 1) * sizeof(int));
cudaMemset(sHisto, 0, (args->pCount + 1) * sizeof(int));
cudaMemset(rnHisto, 0, (args->pCount + 1) * sizeof(int));
cudaMemset(snHisto, 0, (args->pCount + 1) * sizeof(int));
//setting kernel thread dimensions
cudaParam->gridSize =
args->pCountL2; //to avoid the histogram buffer overflow. the size of Histo is pCountL1 * pCountL2
cudaParam->blockSize = MAX_BLOCK_SIZE;
//makign sure all cuda instruction before this point are completed before starting the time measurement
cudaDeviceSynchronize();
//starting time measurement
cudaEventRecord(cudaParam->start, cudaParam->streams[0]);
//copying Key of relation R to GPU for building the histogram
cudaMemcpyAsync(relR->key, hRelR->key, relR->numTuples * sizeof(int), cudaMemcpyHostToDevice, cudaParam->streams[0]);
//building histogram for relation R
histogram_build_L1 << < args->pCountL2, cudaParam->blockSize, args->pCountL1 * sizeof(int), cudaParam->streams[0] >>
> (relR->key, relR->numTuples, args->pCountL1, rHisto);
//getting the prefix sum of the level 1 histogram for relation R
thrust::exclusive_scan(thrust::cuda::par.on(cudaParam->streams[0]),
thrust::device_pointer_cast(rHisto),
thrust::device_pointer_cast(rHisto) + (args->pCountL1 * cudaParam->gridSize + 1),
thrust::device_pointer_cast(rHisto));
//copying id of relation R to GPU
cudaMemcpyAsync(relR->id, hRelR->id, relR->numTuples * sizeof(int), cudaMemcpyHostToDevice, cudaParam->streams[1]);
//making sure that all histogram build and data copy are complete.
cudaDeviceSynchronize();
//re-ordering relation R. This is the first level of partitioning
reorder_L1 << < args->pCountL2, cudaParam->blockSize, args->pCountL1 * sizeof(int), cudaParam->streams[0] >>
> (relR->key, relR->id, relR->numTuples, args->pCountL1, rHisto, relRn->key, relRn->id);
//copying Key of relation S to GPU for building the histogram
cudaMemcpyAsync(relS->key, hRelS->key, relS->numTuples * sizeof(int), cudaMemcpyHostToDevice, cudaParam->streams[2]);
//building histogram for second level of relation R partitioning
histogram_build_L2 << < args->pCountL1, cudaParam->blockSize, args->pCountL2 * sizeof(int), cudaParam->streams[0] >>
> (relRn->key, args->pCountL2, args->pCountL1, rnHisto, rHisto);
//building histogram for relation S
histogram_build_L1 << < args->pCountL2, cudaParam->blockSize, args->pCountL1 * sizeof(int), cudaParam->streams[2] >>
> (relS->key, relS->numTuples, args->pCountL1, sHisto);
//getting the prefix sum of the level 2 histogram for relation R
thrust::exclusive_scan(thrust::cuda::par.on(cudaParam->streams[0]),
thrust::device_pointer_cast(rnHisto),
thrust::device_pointer_cast(rnHisto) + (args->pCountL1 * cudaParam->gridSize + 1),
thrust::device_pointer_cast(rnHisto));
//getting the prefix sum of the level 1 histogram for relation S
thrust::exclusive_scan(thrust::cuda::par.on(cudaParam->streams[2]),
thrust::device_pointer_cast(sHisto),
thrust::device_pointer_cast(sHisto) + (args->pCountL1 * cudaParam->gridSize + 1),
thrust::device_pointer_cast(sHisto));
//copying id of relation S to GPU
cudaMemcpyAsync(relS->id, hRelS->id, relS->numTuples * sizeof(int), cudaMemcpyHostToDevice, cudaParam->streams[3]);
//re-ordering relation R. This is the second level of partitioning
reorder_L2 << < args->pCountL1, cudaParam->blockSize, args->pCountL2 * sizeof(int), cudaParam->streams[0] >>
> (relRn->key, relRn->id, args->pCountL2, args->pCountL1, rnHisto, rHisto, relR->key, relR->id);
//making sure the data transfer of id values of relation S is complete before re-ordering the realtion.
cudaStreamSynchronize(cudaParam->streams[3]);
//re-ordering relation S. This is the first level of partitioning
reorder_L1 << < args->pCountL2, cudaParam->blockSize, args->pCountL1 * sizeof(int), cudaParam->streams[2] >>
> (relS->key, relS->id, relS->numTuples, args->pCountL1, sHisto, relSn->key, relSn->id);
//building histogram for second level of relation S partitioning
histogram_build_L2 << < args->pCountL1, cudaParam->blockSize, args->pCountL2 * sizeof(int), cudaParam->streams[2] >>
> (relSn->key, args->pCountL2, args->pCountL1, snHisto, sHisto);
//getting the prefix sum of the level 2 histogram for relation S
thrust::exclusive_scan(thrust::cuda::par.on(cudaParam->streams[2]),
thrust::device_pointer_cast(snHisto),
thrust::device_pointer_cast(snHisto) + (args->pCountL1 * cudaParam->gridSize + 1),
thrust::device_pointer_cast(snHisto));
//re-ordering relation S. This is the second level of partitioning
reorder_L2 << < args->pCountL1, cudaParam->blockSize, args->pCountL2 * sizeof(int), cudaParam->streams[2] >>
> (relSn->key, relSn->id, args->pCountL2, args->pCountL1, snHisto, sHisto, relS->key, relS->id);
//ending time measurement
cudaEventRecord(cudaParam->stop, cudaParam->streams[2]);
//making sure all CUDA processes are completed before ending the time measurement
cudaDeviceSynchronize();
//measuring time
cudaEventElapsedTime(&cudaParam->time, cudaParam->start, cudaParam->stop);
//displaying execution time
std::cout << "Partition Stage Execution Time for Shared Memory: " << cudaParam->time << " ms" << std::endl;
//starting time measurement
cudaEventRecord(cudaParam->start, cudaParam->streams[0]);
//probe kernel invocation. We assume that the data distribution is uniform.
probe << < std::min(args->pCount, MAX_GRID_SIZE), ceil((float) relS->numTuples / args->pCount), 2
* ceil((float) relS->numTuples / args->pCount) * sizeof(int), cudaParam->streams[0] >>
> (relR->key, relR->id, relS->key, relS->id, rnHisto, snHisto, args->pCount, globalPtr, out);
//copying the results back to the CPU main memory. Assuming a 100% match rate.
cudaMemcpyAsync(args->hOut[0], out, 2 * relS->numTuples * sizeof(int), cudaMemcpyDeviceToHost, cudaParam->streams[0]);
//ending time measurement
cudaEventRecord(cudaParam->stop, cudaParam->streams[0]);
//making sure all CUDA processes are completed before ending the time measurement
cudaDeviceSynchronize();
//measuring time
cudaEventElapsedTime(&cudaParam->time, cudaParam->start, cudaParam->stop);
//checking for any errors during execution
check_cuda_error((char *) __FILE__, __LINE__);
//displaying execution time
std::cout << "Join Stage Execution Time for Shared Memory: " << cudaParam->time << " ms" << std::endl;
//debug code
//displayGPUBuffer(rnHisto, args->hOut[0], args->pCount + 1);
//cleaning up all allocated data
cudaFree(relR->id);
cudaFree(relR->key);
cudaFree(relS->id);
cudaFree(relS->key);
cudaFree(relRn->id);
cudaFree(relRn->key);
cudaFree(relSn->id);
cudaFree(relSn->key);
cudaFree(rHisto);
cudaFree(sHisto);
cudaFree(rnHisto);
cudaFree(snHisto);
cudaFree(out);
cudaFree(globalPtr);
return 0;
}
/**global_memory(relation_t * , relation_t * , args_t *, cudaParameters_t )
* Function to join small relations using GPU global memory.
* hRelR : host side array for relation R.
* hRelS : host side array for relation S.
* agrs : arguments data structure needed for hash join.
* cudaParam : data structure storing the cuda parameters
*/
int global_memory(relation_t *hRelR, relation_t *hRelS, args_t *args, cudaParameters_t *cudaParam) {
//setting up the logger variable
relation_t *relR = (relation_t *) malloc(sizeof(relation_t)); //Device side array for relation R
relation_t *relS = (relation_t *) malloc(sizeof(relation_t));; //Device side array for relation S
relation_t *relRn = (relation_t *) malloc(sizeof(relation_t)); //Device side array for partitioned relation R
relation_t *relSn = (relation_t *) malloc(sizeof(relation_t));; //Device side array for partitioned relation S
relR->numTuples = hRelR->numTuples;
relS->numTuples = hRelS->numTuples;
data *out; //GPU side output buffer
int *globalPtr; //The global pointer that is used to get the index of the output tuples.
//allocating memory for output buffer
cudaMalloc((void **) &out, 2 * relS->numTuples * sizeof(data));
//allocating memory for the global pointer
cudaMalloc((void **) &globalPtr, sizeof(data));
//allocating device memory for storing input data
cudaMalloc((void **) &relR->id, relR->numTuples * sizeof(data));
cudaMalloc((void **) &relR->key, relR->numTuples * sizeof(data));
cudaMalloc((void **) &relS->id, relS->numTuples * sizeof(data));
cudaMalloc((void **) &relS->key, relS->numTuples * sizeof(data));
//allocating device memory for storing partitioned data
relRn->numTuples = relR->numTuples;
relSn->numTuples = relS->numTuples;
cudaMalloc((void **) &relRn->id, relRn->numTuples * sizeof(data));
cudaMalloc((void **) &relRn->key, relRn->numTuples * sizeof(data));
cudaMalloc((void **) &relSn->id, relSn->numTuples * sizeof(data));
cudaMalloc((void **) &relSn->key, relSn->numTuples * sizeof(data));
//declaring device side histogram data
int *rHisto, *sHisto;
int *rnHisto, *snHisto; //To allow for histogram update during re-order.
//allocating device side memory for histogram. An additional entry is required for the last partition.
cudaMalloc((void **) &rHisto, (args->pCount + 1) * sizeof(int));
cudaMalloc((void **) &sHisto, (args->pCount + 1) * sizeof(int));
cudaMalloc((void **) &rnHisto, (args->pCount + 1) * sizeof(int));
cudaMalloc((void **) &snHisto, (args->pCount + 1) * sizeof(int));
//setting the global pointer to 0
cudaMemset(globalPtr, 0, sizeof(int));
//initializing all histogram entries to 0
cudaMemset(rHisto, 0, (args->pCount + 1) * sizeof(int));
cudaMemset(sHisto, 0, (args->pCount + 1) * sizeof(int));
cudaMemset(rnHisto, 0, (args->pCount + 1) * sizeof(int));
cudaMemset(snHisto, 0, (args->pCount + 1) * sizeof(int));
//setting kernel thread dimensions
cudaParam->gridSize =
args->pCountL2; //to avoid the histogram buffer overflow. the size of Histo is pCountL1 * pCountL2
cudaParam->blockSize = MAX_BLOCK_SIZE;
//makign sure all cuda instruction before this point are completed before starting the time measurement
cudaDeviceSynchronize();
//starting time measurement
cudaEventRecord(cudaParam->start, cudaParam->streams[0]);
//copying Key of relation R to GPU for building the histogram
cudaMemcpyAsync(relR->key, hRelR->key, relR->numTuples * sizeof(int), cudaMemcpyHostToDevice, cudaParam->streams[0]);
//building histogram for relation R
histogram_build_global << < std::min((int) (relR->numTuples / cudaParam->blockSize), MAX_GRID_SIZE),
cudaParam->blockSize, 0, cudaParam->streams[0] >> > (relR->key, relR->numTuples, args->pCount, rHisto);
//getting the prefix sum of the level 1 histogram for relation R
thrust::exclusive_scan(thrust::cuda::par.on(cudaParam->streams[0]),
thrust::device_pointer_cast(rHisto),
thrust::device_pointer_cast(rHisto) + (args->pCount + 1),
thrust::device_pointer_cast(rHisto));
//copying id of relation R to GPU
cudaMemcpyAsync(relR->id, hRelR->id, relR->numTuples * sizeof(int), cudaMemcpyHostToDevice, cudaParam->streams[1]);
//creating a device side copy of relation R histogram for re-order kernel. Otehrwise re-order kernel will update the histogram making it unusable for the probe kernel.
cudaMemcpyAsync(rnHisto, rHisto, (args->pCount + 1) * sizeof(int), cudaMemcpyDeviceToDevice, cudaParam->streams[0]);
//making sure that all histogram build and data copy are complete.
cudaDeviceSynchronize();
//re-ordering relation R. This is the first level of partitioning
reorder_global << < std::min((int) (relR->numTuples / cudaParam->blockSize), MAX_GRID_SIZE), cudaParam->blockSize, 0,
cudaParam->streams[0] >> > (relR->key, relR->id, relR->numTuples, args->pCount, rHisto, relRn->key, relRn->id);
//copying Key of relation S to GPU for building the histogram
cudaMemcpyAsync(relS->key, hRelS->key, relS->numTuples * sizeof(int), cudaMemcpyHostToDevice, cudaParam->streams[2]);
//building histogram for relation S
histogram_build_global << < std::min((int) (relS->numTuples / cudaParam->blockSize), MAX_GRID_SIZE),
cudaParam->blockSize, 0, cudaParam->streams[2] >> > (relS->key, relS->numTuples, args->pCount, sHisto);
//getting the prefix sum of the level 1 histogram for relation S
thrust::exclusive_scan(thrust::cuda::par.on(cudaParam->streams[2]),
thrust::device_pointer_cast(sHisto),
thrust::device_pointer_cast(sHisto) + (args->pCount + 1),
thrust::device_pointer_cast(sHisto));
//copying id of relation S to GPU
cudaMemcpyAsync(relS->id, hRelS->id, relS->numTuples * sizeof(int), cudaMemcpyHostToDevice, cudaParam->streams[3]);
//creating a device side copy of relation S histogram for re-order kernel. Otehrwise re-order kernel will update the histogram making it unusable for the probe kernel.
cudaMemcpyAsync(snHisto, sHisto, (args->pCount + 1) * sizeof(int), cudaMemcpyDeviceToDevice, cudaParam->streams[2]);
//making sure the data transfer of id values of relation S is complete before re-ordering the realtion.
cudaStreamSynchronize(cudaParam->streams[3]);
//re-ordering relation S. This is the first level of partitioning
reorder_global << < std::min((int) (relS->numTuples / cudaParam->blockSize), MAX_GRID_SIZE), cudaParam->blockSize, 0,
cudaParam->streams[2] >> > (relS->key, relS->id, relS->numTuples, args->pCount, sHisto, relSn->key, relSn->id);
//ending time measurement
cudaEventRecord(cudaParam->stop, cudaParam->streams[2]);
//making sure all CUDA processes are completed before ending the time measurement
cudaDeviceSynchronize();
//measuring time
cudaEventElapsedTime(&cudaParam->time, cudaParam->start, cudaParam->stop);
//displaying execution time
std::cout << "Partition Stage Execution Time for Global Memory: " << cudaParam->time << " ms" << std::endl;
//starting time measurement
cudaEventRecord(cudaParam->start, cudaParam->streams[0]);
//probe kernel invocation. We assume that the data distribution is uniform.
probe << < std::min(args->pCount, MAX_GRID_SIZE), ceil((float) relS->numTuples / args->pCount), 2
* ceil((float) relS->numTuples / args->pCount) * sizeof(int), cudaParam->streams[0] >>
> (relRn->key, relRn->id, relSn->key, relSn->id, rnHisto, snHisto, args->pCount, globalPtr, out);
//copying the results back to the CPU main memory. Assuming a 100% match rate.
cudaMemcpyAsync(args->hOut[0], out, 2 * relS->numTuples * sizeof(int), cudaMemcpyDeviceToHost, cudaParam->streams[0]);
//ending time measurement
cudaEventRecord(cudaParam->stop, cudaParam->streams[0]);
//making sure all CUDA processes are completed before ending the time measurement
cudaDeviceSynchronize();
//measuring time
cudaEventElapsedTime(&cudaParam->time, cudaParam->start, cudaParam->stop);
//checking for any errors during execution
check_cuda_error((char *) __FILE__, __LINE__);
//displaying execution time
std::cout << "Join Stage Execution Time for Global Memory: " << cudaParam->time << " ms" << std::endl;
//debug code
//displayGPUBuffer(out, args->hOut[0], 2 * relRn->numTuples);
//cleaning up all allocated data
cudaFree(relR->id);
cudaFree(relR->key);
cudaFree(relS->id);
cudaFree(relS->key);
cudaFree(relRn->id);
cudaFree(relRn->key);
cudaFree(relSn->id);
cudaFree(relSn->key);
cudaFree(rHisto);
cudaFree(sHisto);
cudaFree(rnHisto);
cudaFree(snHisto);
cudaFree(out);
cudaFree(globalPtr);
return 0;
}
/**small_data_sm(relation_t * , relation_t * , args_t * )
* Function to join small relations with high match rate. The histogram is
* stored in the shared memory in this implementation.
* hRelR : host side array for relation R.
* hRelS : host side array for relation S.
* agrs : arguments data structure needed for hash join.
* cudaParam : data structure storing the cuda parameters
*/
} |
2017dda801b781e6fec3aa7dc0009ba3d1bed86c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file sum_q_pw_dm_pw.cu
*
* \brief CUDA kernel to perform a summation over xi,xi' indices for the charge density augmentation.
*/
#include "../SDDK/GPU/cuda_common.hpp"
#include "../SDDK/GPU/acc_runtime.hpp"
#ifdef __CUDA
#include "../SDDK/GPU/cuda_timer.hpp"
#endif
__global__ void sum_q_pw_dm_pw_gpu_kernel
(
int nbf__,
double const* q_pw__,
double const* dm_pw__,
double const* sym_weight__,
acc_complex_double_t* rho_pw__
)
{
ACC_DYNAMIC_SHARED( char, sdata_ptr)
double* rho_re = (double*)&sdata_ptr[0];
double* rho_im = (double*)&sdata_ptr[sizeof(double) * blockDim.x];
int igloc = blockIdx.x;
rho_re[threadIdx.x] = 0;
rho_im[threadIdx.x] = 0;
int ld = nbf__ * (nbf__ + 1) / 2;
int N = num_blocks(ld, blockDim.x);
for (int n = 0; n < N; n++) {
int i = n * blockDim.x + threadIdx.x;
if (i < ld) {
double qx = q_pw__[array2D_offset(i, 2 * igloc, ld)];
double qy = q_pw__[array2D_offset(i, 2 * igloc + 1, ld)];
double dx = dm_pw__[array2D_offset(i, 2 * igloc, ld)];
double dy = dm_pw__[array2D_offset(i, 2 * igloc + 1, ld)];
rho_re[threadIdx.x] += sym_weight__[i] * (dx * qx - dy * qy);
rho_im[threadIdx.x] += sym_weight__[i] * (dy * qx + dx * qy);
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if (threadIdx.x % (2 * s) == 0) {
rho_re[threadIdx.x] = rho_re[threadIdx.x] + rho_re[threadIdx.x + s];
rho_im[threadIdx.x] = rho_im[threadIdx.x] + rho_im[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
rho_pw__[igloc] = accCadd(rho_pw__[igloc], make_accDoubleComplex(rho_re[0], rho_im[0]));
}
}
extern "C" void sum_q_pw_dm_pw_gpu(int num_gvec_loc__,
int nbf__,
double const* q_pw__,
double const* dm_pw__,
double const* sym_weight__,
acc_complex_double_t* rho_pw__,
int stream_id__)
{
#ifdef __CUDA
CUDA_timer t("sum_q_pw_dm_pw_gpu");
#endif
acc_stream_t stream = (acc_stream_t)acc::stream(stream_id(stream_id__));
dim3 grid_t(64);
dim3 grid_b(num_gvec_loc__);
accLaunchKernel((sum_q_pw_dm_pw_gpu_kernel), dim3(grid_b), dim3(grid_t), 2 * grid_t.x * sizeof(double), stream,
nbf__,
q_pw__,
dm_pw__,
sym_weight__,
rho_pw__
);
}
| 2017dda801b781e6fec3aa7dc0009ba3d1bed86c.cu | // Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file sum_q_pw_dm_pw.cu
*
* \brief CUDA kernel to perform a summation over xi,xi' indices for the charge density augmentation.
*/
#include "../SDDK/GPU/cuda_common.hpp"
#include "../SDDK/GPU/acc_runtime.hpp"
#ifdef __CUDA
#include "../SDDK/GPU/cuda_timer.hpp"
#endif
__global__ void sum_q_pw_dm_pw_gpu_kernel
(
int nbf__,
double const* q_pw__,
double const* dm_pw__,
double const* sym_weight__,
acc_complex_double_t* rho_pw__
)
{
ACC_DYNAMIC_SHARED( char, sdata_ptr)
double* rho_re = (double*)&sdata_ptr[0];
double* rho_im = (double*)&sdata_ptr[sizeof(double) * blockDim.x];
int igloc = blockIdx.x;
rho_re[threadIdx.x] = 0;
rho_im[threadIdx.x] = 0;
int ld = nbf__ * (nbf__ + 1) / 2;
int N = num_blocks(ld, blockDim.x);
for (int n = 0; n < N; n++) {
int i = n * blockDim.x + threadIdx.x;
if (i < ld) {
double qx = q_pw__[array2D_offset(i, 2 * igloc, ld)];
double qy = q_pw__[array2D_offset(i, 2 * igloc + 1, ld)];
double dx = dm_pw__[array2D_offset(i, 2 * igloc, ld)];
double dy = dm_pw__[array2D_offset(i, 2 * igloc + 1, ld)];
rho_re[threadIdx.x] += sym_weight__[i] * (dx * qx - dy * qy);
rho_im[threadIdx.x] += sym_weight__[i] * (dy * qx + dx * qy);
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if (threadIdx.x % (2 * s) == 0) {
rho_re[threadIdx.x] = rho_re[threadIdx.x] + rho_re[threadIdx.x + s];
rho_im[threadIdx.x] = rho_im[threadIdx.x] + rho_im[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
rho_pw__[igloc] = accCadd(rho_pw__[igloc], make_accDoubleComplex(rho_re[0], rho_im[0]));
}
}
extern "C" void sum_q_pw_dm_pw_gpu(int num_gvec_loc__,
int nbf__,
double const* q_pw__,
double const* dm_pw__,
double const* sym_weight__,
acc_complex_double_t* rho_pw__,
int stream_id__)
{
#ifdef __CUDA
CUDA_timer t("sum_q_pw_dm_pw_gpu");
#endif
acc_stream_t stream = (acc_stream_t)acc::stream(stream_id(stream_id__));
dim3 grid_t(64);
dim3 grid_b(num_gvec_loc__);
accLaunchKernel((sum_q_pw_dm_pw_gpu_kernel), dim3(grid_b), dim3(grid_t), 2 * grid_t.x * sizeof(double), stream,
nbf__,
q_pw__,
dm_pw__,
sym_weight__,
rho_pw__
);
}
|
0b989649c7a5dcc87c5019abf8e848d0ca58c273.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/funcs/pooling.h"
#include <algorithm>
#include <vector>
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
namespace phi {
namespace funcs {
struct FastDivModForPooling {
public:
paddle::platform::FastDivMod channel;
paddle::platform::FastDivMod width;
paddle::platform::FastDivMod height;
explicit HOSTDEVICE FastDivModForPooling(const int channels,
const int output_width,
const int output_height) {
channel = paddle::platform::FastDivMod(channels);
width = paddle::platform::FastDivMod(output_width);
height = paddle::platform::FastDivMod(output_height);
}
};
struct FastDivModForPoolingWithMoreStaff {
public:
paddle::platform::FastDivMod channel;
paddle::platform::FastDivMod width;
paddle::platform::FastDivMod height;
paddle::platform::FastDivMod ksize_w;
paddle::platform::FastDivMod ksize_h;
paddle::platform::FastDivMod stride_w;
paddle::platform::FastDivMod stride_h;
explicit HOSTDEVICE FastDivModForPoolingWithMoreStaff(
const int channels,
const int input_width,
const int input_height,
const int ksize_width,
const int ksize_height,
const int stride_width,
const int stride_height) {
channel = paddle::platform::FastDivMod(channels);
width = paddle::platform::FastDivMod(input_width);
height = paddle::platform::FastDivMod(input_height);
ksize_w = paddle::platform::FastDivMod(ksize_width);
ksize_h = paddle::platform::FastDivMod(ksize_height);
stride_w = paddle::platform::FastDivMod(stride_width);
stride_h = paddle::platform::FastDivMod(stride_height);
}
};
template <typename FastDivModForPooling>
__device__ void OffsetPreparationFor4Dimension(int index,
bool channel_last,
FastDivModForPooling divmods,
const int pad_width,
const int pad_height,
const int aux_width,
const int aux_height,
int* w_offset,
int* h_offset,
int* c_offset,
int* stride) {
if (!channel_last) { /* NCHW */
auto input_width_divmod = divmods.width.Divmod(index);
auto input_height_divmod = divmods.height.Divmod(input_width_divmod.val[0]);
auto channel_divmod = divmods.channel.Divmod(input_height_divmod.val[0]);
*w_offset = input_width_divmod.val[1] + pad_width;
*h_offset = input_height_divmod.val[1] + pad_height;
*c_offset = channel_divmod.val[1];
*stride = (channel_divmod.val[0] * divmods.channel.divisor + *c_offset) *
aux_height * aux_width;
} else { /* NHWC */
auto c_divmod = divmods.channel.Divmod(index);
auto input_width_divmod = divmods.width.Divmod(c_divmod.val[0]);
auto input_height_divmod = divmods.height.Divmod(input_width_divmod.val[0]);
*c_offset = c_divmod.val[1];
*w_offset = input_width_divmod.val[1] + pad_width;
*h_offset = input_height_divmod.val[1] + pad_height;
*stride = input_height_divmod.val[0] * aux_height * aux_width *
divmods.channel.divisor;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool2D(const int nthreads,
const T* input_data,
const int channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
FastDivModForPooling divmods,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* output_data,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int hstart, hend, wstart, wend;
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
channel_last,
divmods,
0,
0,
input_width,
input_height,
&w_offset,
&h_offset,
&c_offset,
&input_offset);
input_data += input_offset;
if (adaptive) {
hstart = AdaptStartIndex(h_offset, input_height, output_height);
hend = AdaptEndIndex(h_offset, input_height, output_height);
wstart = AdaptStartIndex(w_offset, input_width, output_width);
wend = AdaptEndIndex(w_offset, input_width, output_width);
} else {
hstart = h_offset * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = w_offset * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
auto input_idx = channel_last
? (h * input_width + w) * channels + c_offset
: h * input_width + w;
pool_process.compute(input_data[input_idx], &ele);
}
}
int pool_size = (exclusive || adaptive) ? (hend - hstart) * (wend - wstart)
: ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename T, typename PoolProcess>
__global__ void KernelPool2DGrad(const int nthreads,
const T* __restrict__ input_data,
const T* __restrict__ output_data,
const T* __restrict__ output_grad,
const int output_width,
const int output_height,
const int input_width,
const int input_height,
const int ksize_width,
const int ksize_height,
const int stride_width,
const int stride_height,
const int padding_width,
const int padding_height,
FastDivModForPoolingWithMoreStaff divmods,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* __restrict__ input_grad,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
T input = static_cast<T>(0);
T input_grad_data = static_cast<T>(0);
int phstart, phend, pwstart, pwend;
int w_offset, h_offset, c_offset, output_offset;
OffsetPreparationFor4Dimension<>(index,
channel_last,
divmods,
padding_width,
padding_height,
output_width,
output_height,
&w_offset,
&h_offset,
&c_offset,
&output_offset);
if (pool_process.use_x) {
input = input_data[index];
output_data += output_offset;
}
output_grad += output_offset;
if (adaptive) {
auto tmp_phend = divmods.height.Divmod((h_offset + 1) * output_height);
auto tmp_pwend = divmods.width.Divmod((w_offset + 1) * output_width);
phstart = divmods.height.Div(h_offset * output_height);
pwstart = divmods.width.Div(w_offset * output_width);
phend = tmp_phend.val[1] > 0 ? tmp_phend.val[0] + 1 : tmp_phend.val[0];
pwend = tmp_pwend.val[1] > 0 ? tmp_pwend.val[0] + 1 : tmp_pwend.val[0];
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
auto ksize_w_divmod = divmods.ksize_w.Divmod(input_width);
auto ksize_h_divmod = divmods.ksize_h.Divmod(input_height);
auto tmp_width = ksize_w_divmod.val[1] > 0 ? ksize_w_divmod.val[0] + 1
: ksize_w_divmod.val[0];
auto tmp_height = ksize_h_divmod.val[1] > 0
? ksize_h_divmod.val[0] + 1
: ksize_h_divmod.val[0];
int pool_size = tmp_height * tmp_width;
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input,
ouput_value,
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
} else {
auto stride_height_div = divmods.stride_h.Div(h_offset - ksize_height);
auto stride_width_div = divmods.stride_w.Div(w_offset - ksize_width);
phstart = (h_offset < ksize_height) ? 0 : stride_height_div + 1;
pwstart = (w_offset < ksize_width) ? 0 : stride_width_div + 1;
phend = min(divmods.stride_h.Div(h_offset) + 1, output_height);
pwend = min(divmods.stride_w.Div(w_offset) + 1, output_width);
if (exclusive) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int pool_size = (hend - hstart) * (wend - wstart);
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input,
ouput_value,
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
} else {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int pool_size = ksize_height * ksize_width;
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input,
ouput_value,
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
}
}
input_grad[index] = input_grad_data;
}
}
template <typename T>
__global__ void KernelMaxPool2DGrad(const int nthreads,
const T* input_data,
const T* output_data,
const T* output_grad,
const int channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
T* input_grad,
FastDivModForPooling divmods,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
channel_last,
divmods,
0,
0,
input_width,
input_height,
&w_offset,
&h_offset,
&c_offset,
&input_offset);
input_data += input_offset;
input_grad += input_offset;
int hstart = h_offset * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = w_offset * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
T ele = output_data[index];
int maxIndex = -1;
bool stop = false;
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
int input_data_idx = channel_last
? (h * input_width + w) * channels + c_offset
: h * input_width + w;
if (ele == input_data[input_data_idx]) {
maxIndex = input_data_idx;
stop = true;
}
}
}
if (maxIndex != -1) {
// atomic add
paddle::platform::CudaAtomicAdd(input_grad + maxIndex,
output_grad[index]);
}
}
}
template <typename PoolProcess, typename T>
void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input,
const std::vector<int>& input_shape,
const std::vector<int>& output_shape,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
T* output,
gpuStream_t stream,
PoolProcess pool_compute) {
const int batch_size = input_shape[0];
const int input_channels = input_shape[1];
const int input_height = input_shape[2];
const int input_width = input_shape[3];
const int output_channels = output_shape[1];
const int output_height = output_shape[2];
const int output_width = output_shape[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
// backends::gpu::ChangeThreadNum(context, &thread_num);
thread_num = 512;
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, stream, nthreads,
input,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
pool_divmods,
pool_compute,
exclusive,
adaptive,
output);
}
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dFunctor<phi::GPUContext, PoolProcess, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
pool_divmods,
pool_process,
exclusive,
adaptive,
output_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output->dims()[3] : output->dims()[1];
const int output_height =
channel_last ? output->dims()[1] : output->dims()[2];
const int output_width =
channel_last ? output->dims()[2] : output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
pool_divmods,
pool_process,
exclusive,
adaptive,
output_data,
channel_last);
}
};
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dGradFunctor<phi::GPUContext, PoolProcess, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * input_channels * input_height * input_width;
auto pool_divmods = FastDivModForPoolingWithMoreStaff(input_channels,
input_width,
input_height,
ksize_width,
ksize_height,
stride_width,
stride_height);
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(context, nthreads);
hipLaunchKernelGGL(( KernelPool2DGrad<T, PoolProcess>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
context.stream(), nthreads,
input_data,
output_data,
output_grad_data,
output_width,
output_height,
input_width,
input_height,
ksize_width,
ksize_height,
stride_width,
stride_height,
padding_width,
padding_height,
pool_divmods,
pool_process,
exclusive,
adaptive,
input_grad_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output.dims()[3] : output.dims()[1];
const int output_height =
channel_last ? output.dims()[1] : output.dims()[2];
const int output_width = channel_last ? output.dims()[2] : output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * input_channels * input_height * input_width;
auto pool_divmods = FastDivModForPoolingWithMoreStaff(input_channels,
input_width,
input_height,
ksize_width,
ksize_height,
stride_width,
stride_height);
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(context, nthreads);
hipLaunchKernelGGL(( KernelPool2DGrad<T, PoolProcess>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
context.stream(), nthreads,
input_data,
output_data,
output_grad_data,
output_width,
output_height,
input_width,
input_height,
ksize_width,
ksize_height,
stride_width,
stride_height,
padding_width,
padding_height,
pool_divmods,
pool_process,
exclusive,
adaptive,
input_grad_data,
channel_last);
}
};
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename T>
class MaxPool2dGradFunctor<phi::GPUContext, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
DenseTensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
input_grad_data,
pool_divmods);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
DenseTensor* input_grad) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output.dims()[3] : output.dims()[1];
const int output_height =
channel_last ? output.dims()[1] : output.dims()[2];
const int output_width = channel_last ? output.dims()[2] : output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
input_grad_data,
pool_divmods,
channel_last);
}
};
template class Pool2dDirectCUDAFunctor<MaxPool<float>, float>;
template class Pool2dDirectCUDAFunctor<AvgPool<float>, float>;
template class MaxPool2dGradFunctor<phi::GPUContext, float>;
template class MaxPool2dGradFunctor<phi::GPUContext, double>;
template class MaxPool2dGradFunctor<phi::GPUContext, dtype::float16>;
template class Pool2dFunctor<phi::GPUContext, MaxPool<float>, float>;
template class Pool2dFunctor<phi::GPUContext, AvgPool<float>, float>;
template class Pool2dGradFunctor<phi::GPUContext, MaxPoolGrad<float>, float>;
template class Pool2dGradFunctor<phi::GPUContext, AvgPoolGrad<float>, float>;
template class Pool2dFunctor<phi::GPUContext, MaxPool<double>, double>;
template class Pool2dFunctor<phi::GPUContext, AvgPool<double>, double>;
template class Pool2dGradFunctor<phi::GPUContext, MaxPoolGrad<double>, double>;
template class Pool2dGradFunctor<phi::GPUContext, AvgPoolGrad<double>, double>;
template class Pool2dFunctor<phi::GPUContext,
MaxPool<dtype::float16>,
dtype::float16>;
template class Pool2dFunctor<phi::GPUContext,
AvgPool<dtype::float16>,
dtype::float16>;
template class Pool2dGradFunctor<phi::GPUContext,
MaxPoolGrad<dtype::float16>,
dtype::float16>;
template class Pool2dGradFunctor<phi::GPUContext,
AvgPoolGrad<dtype::float16>,
dtype::float16>;
template <typename PoolProcess, typename T>
__global__ void KernelPool3D(const int nthreads,
const T* input_data,
const int channels,
const int input_depth,
const int input_height,
const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* output_data,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw, ph, pd, c, batch_idx;
if (!channel_last) {
pw = index % output_width;
ph = (index / output_width) % output_height;
pd = (index / output_width / output_height) % output_depth;
c = (index / output_width / output_height / output_depth) % channels;
batch_idx =
index / output_width / output_height / output_depth / channels;
} else {
c = index % channels;
pw = (index / channels) % output_width;
ph = (index / channels / output_width) % output_height;
pd = (index / channels / output_width / output_height) % output_depth;
batch_idx =
index / channels / output_width / output_height / output_depth;
}
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
int input_data_stride;
if (!channel_last) { /* NCDHW */
input_data_stride =
(batch_idx * channels + c) * input_depth * input_height * input_width;
} else { /* NDHWC */
input_data_stride =
batch_idx * input_depth * input_height * input_width * channels;
}
input_data += input_data_stride;
T ele = pool_process.initial();
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
auto input_data_idx =
channel_last
? ((d * input_height + h) * input_width + w) * channels + c
: (d * input_height + h) * input_width + w;
pool_process.compute(input_data[input_data_idx], &ele);
}
}
}
int pool_size = (exclusive || adaptive)
? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename T, typename PoolProcess>
__global__ void KernelPool3DGrad(const int nthreads,
const T* __restrict__ input_data,
const T* __restrict__ output_data,
const T* __restrict__ output_grad,
const int channels,
const int input_depth,
const int input_height,
const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* input_grad,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset, h_offset, d_offset, c_offset, batch_idx, output_stride;
T input = static_cast<T>(0);
if (!channel_last) { /* "NCDHW" */
w_offset = index % input_width + padding_width;
h_offset = (index / input_width) % input_height + padding_height;
d_offset =
(index / input_width / input_height) % input_depth + padding_depth;
c_offset = (index / input_width / input_height / input_depth) % channels;
batch_idx = index / input_width / input_height / input_depth / channels;
output_stride = (batch_idx * channels + c_offset) * output_depth *
output_height * output_width;
} else { /* "NDHWC" */
c_offset = index % channels;
w_offset = (index / channels) % input_width + padding_width;
h_offset =
(index / channels / input_width) % input_height + padding_height;
d_offset = (index / channels / input_width / input_height) % input_depth +
padding_depth;
batch_idx = index / channels / input_width / input_height / input_depth;
output_stride =
batch_idx * output_depth * output_height * output_width * channels;
}
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = AdaptStartIndex(d_offset, output_depth, input_depth);
pdend = AdaptEndIndex(d_offset, output_depth, input_depth);
phstart = AdaptStartIndex(h_offset, output_height, input_height);
phend = AdaptEndIndex(h_offset, output_height, input_height);
pwstart = AdaptStartIndex(w_offset, output_width, input_width);
pwend = AdaptEndIndex(w_offset, output_width, input_width);
} else {
pdstart = (d_offset < ksize_depth)
? 0
: (d_offset - ksize_depth) / stride_depth + 1;
phstart = (h_offset < ksize_height)
? 0
: (h_offset - ksize_height) / stride_height + 1;
pwstart = (w_offset < ksize_width)
? 0
: (w_offset - ksize_width) / stride_width + 1;
pdend = min((d_offset) / stride_depth + 1, output_depth);
phend = min((h_offset) / stride_height + 1, output_height);
pwend = min((w_offset) / stride_width + 1, output_width);
}
if (pool_process.use_x) {
input = input_data[index];
output_data += output_stride;
}
output_grad += output_stride;
T input_grad_data = static_cast<T>(0.0);
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int pool_size;
if (adaptive) {
pool_size =
static_cast<int>(
ceil(static_cast<double>(input_depth) / ksize_depth)) *
static_cast<int>(
ceil(static_cast<double>(input_height) / ksize_height)) *
static_cast<int>(
ceil(static_cast<double>(input_width) / ksize_width));
} else {
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
pool_size =
exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
}
int output_sub_idx =
channel_last
? ((pd * output_height + ph) * output_width + pw) * channels +
c_offset
: (pd * output_height + ph) * output_width + pw;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input,
ouput_value,
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
}
input_grad[index] = input_grad_data;
}
}
template <typename T>
__global__ void KernelMaxPool3DGrad(const int nthreads,
const T* input_data,
const T* output_data,
const T* output_grad,
const int channels,
const int input_depth,
const int input_height,
const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
T* input_grad,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw, ph, pd, c, batch_idx;
if (!channel_last) { /*NCDHW*/
pw = index % output_width;
ph = (index / output_width) % output_height;
pd = (index / output_width / output_height) % output_depth;
c = (index / output_width / output_height / output_depth) % channels;
batch_idx =
index / output_width / output_height / output_depth / channels;
} else { /*NDHWC*/
c = index % channels;
pw = (index / channels) % output_width;
ph = (index / channels / output_width) % output_height;
pd = (index / channels / output_width / output_height) % output_depth;
batch_idx =
index / channels / output_width / output_height / output_depth;
}
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = output_data[index];
bool stop = false;
int maxIdx = -1;
int input_stride;
if (!channel_last) {
input_stride =
(batch_idx * channels + c) * input_depth * input_height * input_width;
} else {
input_stride =
batch_idx * input_depth * input_height * input_width * channels;
}
input_data += input_stride;
input_grad += input_stride;
for (int d = dstart; d < dend && !stop; ++d) {
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
int input_data_idx =
channel_last
? ((d * input_height + h) * input_width + w) * channels + c
: (d * input_height + h) * input_width + w;
if (ele == input_data[input_data_idx]) {
stop = true;
maxIdx = input_data_idx;
}
}
}
}
if (maxIdx != -1) {
// atomic add
paddle::platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]);
}
}
}
template <typename PoolProcess, typename T>
void Pool3dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input,
const std::vector<int>& input_shape,
const std::vector<int>& output_shape,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
T* output,
gpuStream_t stream,
PoolProcess pool_compute) {
const int batch_size = input_shape[0];
const int input_channels = input_shape[1];
const int input_depth = input_shape[2];
const int input_height = input_shape[3];
const int input_width = input_shape[4];
const int output_channels = output_shape[1];
const int output_depth = output_shape[2];
const int output_height = output_shape[3];
const int output_width = output_shape[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
thread_num = 512;
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, stream, nthreads,
input,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_compute,
exclusive,
adaptive,
output);
}
/*
* Tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dFunctor<phi::GPUContext, PoolProcess, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
output_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output->dims()[4] : output->dims()[1];
const int output_depth =
channel_last ? output->dims()[1] : output->dims()[2];
const int output_height =
channel_last ? output->dims()[2] : output->dims()[3];
const int output_width =
channel_last ? output->dims()[3] : output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
output_data,
channel_last);
}
};
/*
* Tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dGradFunctor<phi::GPUContext, PoolProcess, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3DGrad<T, PoolProcess>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
input_grad_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output.dims()[4] : output.dims()[1];
const int output_depth = channel_last ? output.dims()[1] : output.dims()[2];
const int output_height =
channel_last ? output.dims()[2] : output.dims()[3];
const int output_width = channel_last ? output.dims()[3] : output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3DGrad<T, PoolProcess>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
input_grad_data,
channel_last); // add channel_last
}
};
/*
* tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <class T>
class MaxPool3dGradFunctor<phi::GPUContext, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
DenseTensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
input_grad_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
DenseTensor* input_grad) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output.dims()[4] : output.dims()[1];
const int output_depth = channel_last ? output.dims()[1] : output.dims()[2];
const int output_height =
channel_last ? output.dims()[2] : output.dims()[3];
const int output_width = channel_last ? output.dims()[3] : output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
input_grad_data,
channel_last); // add channel_last
}
};
template class Pool3dDirectCUDAFunctor<MaxPool<float>, float>;
template class Pool3dDirectCUDAFunctor<AvgPool<float>, float>;
template class MaxPool3dGradFunctor<phi::GPUContext, float>;
template class MaxPool3dGradFunctor<phi::GPUContext, double>;
template class MaxPool3dGradFunctor<phi::GPUContext, dtype::float16>;
template class Pool3dFunctor<phi::GPUContext, MaxPool<float>, float>;
template class Pool3dFunctor<phi::GPUContext, AvgPool<float>, float>;
template class Pool3dGradFunctor<phi::GPUContext, MaxPoolGrad<float>, float>;
template class Pool3dGradFunctor<phi::GPUContext, AvgPoolGrad<float>, float>;
template class Pool3dFunctor<phi::GPUContext, MaxPool<double>, double>;
template class Pool3dFunctor<phi::GPUContext, AvgPool<double>, double>;
template class Pool3dGradFunctor<phi::GPUContext, MaxPoolGrad<double>, double>;
template class Pool3dGradFunctor<phi::GPUContext, AvgPoolGrad<double>, double>;
template class Pool3dFunctor<phi::GPUContext,
MaxPool<dtype::float16>,
dtype::float16>;
template class Pool3dFunctor<phi::GPUContext,
AvgPool<dtype::float16>,
dtype::float16>;
template class Pool3dGradFunctor<phi::GPUContext,
MaxPoolGrad<dtype::float16>,
dtype::float16>;
template class Pool3dGradFunctor<phi::GPUContext,
AvgPoolGrad<dtype::float16>,
dtype::float16>;
template <typename T1, typename T2>
__global__ void KernelMaxPool2dWithIdx(const int nthreads,
const T1* input_data,
const int channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
bool adaptive,
T1* output_data,
T2* mask_data,
FastDivModForPooling divmods) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int hstart, hend, wstart, wend;
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
false,
divmods,
0,
0,
input_width,
input_height,
&w_offset,
&h_offset,
&c_offset,
&input_offset);
input_data += input_offset;
if (adaptive) {
hstart = AdaptStartIndex(h_offset, input_height, output_height);
hend = AdaptEndIndex(h_offset, input_height, output_height);
wstart = AdaptStartIndex(w_offset, input_width, output_width);
wend = AdaptEndIndex(w_offset, input_width, output_width);
} else {
hstart = h_offset * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = w_offset * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
T1 ele = -FLT_MAX;
int max_index = -1;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * input_width + w;
if (ele < input_data[input_index]) {
max_index = input_index;
ele = input_data[input_index];
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool2DWithIdxGrad(const int nthreads,
const T1* output_grad,
const T2* mask_data,
const int channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
bool adaptive,
T1* input_grad,
FastDivModForPooling divmods) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int phstart, phend, pwstart, pwend;
int w_offset, h_offset, c_offset, output_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
false,
divmods,
0,
0,
output_width,
output_height,
&w_offset,
&h_offset,
&c_offset,
&output_offset);
mask_data += output_offset;
output_grad += output_offset;
if (adaptive) {
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 input_grad_data = 0;
int input_current_featuremap_idx = h_offset * input_width + w_offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_data[ph * output_width + pw] == input_current_featuremap_idx)
input_grad_data += output_grad[ph * output_width + pw];
}
}
input_grad[index] = input_grad_data;
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexFunctor<phi::GPUContext, T1, T2> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool adaptive,
DenseTensor* output,
DenseTensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T1* input_data = input.data<T1>();
T1* output_data = context.template Alloc<T1>(output);
T2* mask_data = context.template Alloc<T2>(mask);
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelMaxPool2dWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
adaptive,
output_data,
mask_data,
pool_divmods);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor<phi::GPUContext, T1, T2> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& output_grad,
const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool adaptive,
DenseTensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_height = input_grad->dims()[2];
const int input_width = input_grad->dims()[3];
const int output_height = output_grad.dims()[2];
const int output_width = output_grad.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = context.template Alloc<T1>(input_grad);
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, input_width, input_height);
hipLaunchKernelGGL(( KernelMaxPool2DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
output_grad_data,
mask_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
adaptive,
input_grad_data,
pool_divmods);
}
};
template class MaxPool2dWithIndexFunctor<phi::GPUContext, float, int>;
template class MaxPool2dWithIndexGradFunctor<phi::GPUContext, float, int>;
template class MaxPool2dWithIndexFunctor<phi::GPUContext, double, int>;
template class MaxPool2dWithIndexGradFunctor<phi::GPUContext, double, int>;
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdx(const int nthreads,
const T1* input_data,
const int channels,
const int input_depth,
const int input_height,
const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
bool adaptive,
T1* output_data,
T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
T1 ele = -FLT_MAX;
int max_index = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (ele < input_data[(d * input_height + h) * input_width + w]) {
max_index = (d * input_height + h) * input_width + w;
ele = input_data[max_index];
}
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdxGrad(const int nthreads,
const T1* output_grad,
const T2* mask,
const int channels,
const int input_depth,
const int input_height,
const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
bool adaptive,
T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int d_offset = (index / input_width / input_height) % input_depth;
int c_offset =
(index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = d_offset * output_depth / input_depth;
pdend =
min((d_offset + 1) * output_depth / input_depth + 1, output_depth);
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
pdstart =
(d_offset + padding_depth < ksize_depth)
? 0
: (d_offset + padding_depth - ksize_depth) / stride_depth + 1;
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
pdend = min((d_offset + padding_depth) / stride_depth + 1, output_depth);
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 input_grad_data = 0;
int input_current_feature_map_idx =
(d_offset * input_height + h_offset) * input_width + w_offset;
int output_idx = (batch_idx * channels + c_offset) * output_depth *
output_height * output_width;
mask += output_idx;
output_grad += output_idx;
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[(pd * output_height + ph) * output_width + pw] ==
input_current_feature_map_idx)
input_grad_data +=
output_grad[(pd * output_height + ph) * output_width + pw];
}
}
}
input_grad[index] = input_grad_data;
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexFunctor<phi::GPUContext, T1, T2> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool adaptive,
DenseTensor* output,
DenseTensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* input_data = input.data<T1>();
T1* output_data = context.template Alloc<T1>(output);
T2* mask_data = context.template Alloc<T2>(mask);
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
input_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
adaptive,
output_data,
mask_data);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor<phi::GPUContext, T1, T2> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& output_grad,
const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool adaptive,
DenseTensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_depth = input_grad->dims()[2];
const int input_height = input_grad->dims()[3];
const int input_width = input_grad->dims()[4];
const int output_depth = output_grad.dims()[2];
const int output_height = output_grad.dims()[3];
const int output_width = output_grad.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* output_grad_data = output_grad.data<T1>();
const T2* mask_data = mask.data<T2>();
T1* input_grad_data = context.template Alloc<T1>(input_grad);
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads,
output_grad_data,
mask_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
adaptive,
input_grad_data);
}
};
template class MaxPool3dWithIndexFunctor<phi::GPUContext, float, int>;
template class MaxPool3dWithIndexGradFunctor<phi::GPUContext, float, int>;
template class MaxPool3dWithIndexFunctor<phi::GPUContext, double, int>;
template class MaxPool3dWithIndexGradFunctor<phi::GPUContext, double, int>;
} // namespace funcs
} // namespace phi
| 0b989649c7a5dcc87c5019abf8e848d0ca58c273.cu | /* Copyright (c) 2022 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/funcs/pooling.h"
#include <algorithm>
#include <vector>
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
namespace phi {
namespace funcs {
struct FastDivModForPooling {
public:
paddle::platform::FastDivMod channel;
paddle::platform::FastDivMod width;
paddle::platform::FastDivMod height;
explicit HOSTDEVICE FastDivModForPooling(const int channels,
const int output_width,
const int output_height) {
channel = paddle::platform::FastDivMod(channels);
width = paddle::platform::FastDivMod(output_width);
height = paddle::platform::FastDivMod(output_height);
}
};
struct FastDivModForPoolingWithMoreStaff {
public:
paddle::platform::FastDivMod channel;
paddle::platform::FastDivMod width;
paddle::platform::FastDivMod height;
paddle::platform::FastDivMod ksize_w;
paddle::platform::FastDivMod ksize_h;
paddle::platform::FastDivMod stride_w;
paddle::platform::FastDivMod stride_h;
explicit HOSTDEVICE FastDivModForPoolingWithMoreStaff(
const int channels,
const int input_width,
const int input_height,
const int ksize_width,
const int ksize_height,
const int stride_width,
const int stride_height) {
channel = paddle::platform::FastDivMod(channels);
width = paddle::platform::FastDivMod(input_width);
height = paddle::platform::FastDivMod(input_height);
ksize_w = paddle::platform::FastDivMod(ksize_width);
ksize_h = paddle::platform::FastDivMod(ksize_height);
stride_w = paddle::platform::FastDivMod(stride_width);
stride_h = paddle::platform::FastDivMod(stride_height);
}
};
template <typename FastDivModForPooling>
__device__ void OffsetPreparationFor4Dimension(int index,
bool channel_last,
FastDivModForPooling divmods,
const int pad_width,
const int pad_height,
const int aux_width,
const int aux_height,
int* w_offset,
int* h_offset,
int* c_offset,
int* stride) {
if (!channel_last) { /* NCHW */
auto input_width_divmod = divmods.width.Divmod(index);
auto input_height_divmod = divmods.height.Divmod(input_width_divmod.val[0]);
auto channel_divmod = divmods.channel.Divmod(input_height_divmod.val[0]);
*w_offset = input_width_divmod.val[1] + pad_width;
*h_offset = input_height_divmod.val[1] + pad_height;
*c_offset = channel_divmod.val[1];
*stride = (channel_divmod.val[0] * divmods.channel.divisor + *c_offset) *
aux_height * aux_width;
} else { /* NHWC */
auto c_divmod = divmods.channel.Divmod(index);
auto input_width_divmod = divmods.width.Divmod(c_divmod.val[0]);
auto input_height_divmod = divmods.height.Divmod(input_width_divmod.val[0]);
*c_offset = c_divmod.val[1];
*w_offset = input_width_divmod.val[1] + pad_width;
*h_offset = input_height_divmod.val[1] + pad_height;
*stride = input_height_divmod.val[0] * aux_height * aux_width *
divmods.channel.divisor;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool2D(const int nthreads,
const T* input_data,
const int channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
FastDivModForPooling divmods,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* output_data,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int hstart, hend, wstart, wend;
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
channel_last,
divmods,
0,
0,
input_width,
input_height,
&w_offset,
&h_offset,
&c_offset,
&input_offset);
input_data += input_offset;
if (adaptive) {
hstart = AdaptStartIndex(h_offset, input_height, output_height);
hend = AdaptEndIndex(h_offset, input_height, output_height);
wstart = AdaptStartIndex(w_offset, input_width, output_width);
wend = AdaptEndIndex(w_offset, input_width, output_width);
} else {
hstart = h_offset * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = w_offset * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
auto input_idx = channel_last
? (h * input_width + w) * channels + c_offset
: h * input_width + w;
pool_process.compute(input_data[input_idx], &ele);
}
}
int pool_size = (exclusive || adaptive) ? (hend - hstart) * (wend - wstart)
: ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename T, typename PoolProcess>
__global__ void KernelPool2DGrad(const int nthreads,
const T* __restrict__ input_data,
const T* __restrict__ output_data,
const T* __restrict__ output_grad,
const int output_width,
const int output_height,
const int input_width,
const int input_height,
const int ksize_width,
const int ksize_height,
const int stride_width,
const int stride_height,
const int padding_width,
const int padding_height,
FastDivModForPoolingWithMoreStaff divmods,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* __restrict__ input_grad,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
T input = static_cast<T>(0);
T input_grad_data = static_cast<T>(0);
int phstart, phend, pwstart, pwend;
int w_offset, h_offset, c_offset, output_offset;
OffsetPreparationFor4Dimension<>(index,
channel_last,
divmods,
padding_width,
padding_height,
output_width,
output_height,
&w_offset,
&h_offset,
&c_offset,
&output_offset);
if (pool_process.use_x) {
input = input_data[index];
output_data += output_offset;
}
output_grad += output_offset;
if (adaptive) {
auto tmp_phend = divmods.height.Divmod((h_offset + 1) * output_height);
auto tmp_pwend = divmods.width.Divmod((w_offset + 1) * output_width);
phstart = divmods.height.Div(h_offset * output_height);
pwstart = divmods.width.Div(w_offset * output_width);
phend = tmp_phend.val[1] > 0 ? tmp_phend.val[0] + 1 : tmp_phend.val[0];
pwend = tmp_pwend.val[1] > 0 ? tmp_pwend.val[0] + 1 : tmp_pwend.val[0];
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
auto ksize_w_divmod = divmods.ksize_w.Divmod(input_width);
auto ksize_h_divmod = divmods.ksize_h.Divmod(input_height);
auto tmp_width = ksize_w_divmod.val[1] > 0 ? ksize_w_divmod.val[0] + 1
: ksize_w_divmod.val[0];
auto tmp_height = ksize_h_divmod.val[1] > 0
? ksize_h_divmod.val[0] + 1
: ksize_h_divmod.val[0];
int pool_size = tmp_height * tmp_width;
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input,
ouput_value,
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
} else {
auto stride_height_div = divmods.stride_h.Div(h_offset - ksize_height);
auto stride_width_div = divmods.stride_w.Div(w_offset - ksize_width);
phstart = (h_offset < ksize_height) ? 0 : stride_height_div + 1;
pwstart = (w_offset < ksize_width) ? 0 : stride_width_div + 1;
phend = min(divmods.stride_h.Div(h_offset) + 1, output_height);
pwend = min(divmods.stride_w.Div(w_offset) + 1, output_width);
if (exclusive) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int pool_size = (hend - hstart) * (wend - wstart);
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input,
ouput_value,
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
} else {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int pool_size = ksize_height * ksize_width;
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input,
ouput_value,
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
}
}
input_grad[index] = input_grad_data;
}
}
template <typename T>
__global__ void KernelMaxPool2DGrad(const int nthreads,
const T* input_data,
const T* output_data,
const T* output_grad,
const int channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
T* input_grad,
FastDivModForPooling divmods,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
channel_last,
divmods,
0,
0,
input_width,
input_height,
&w_offset,
&h_offset,
&c_offset,
&input_offset);
input_data += input_offset;
input_grad += input_offset;
int hstart = h_offset * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = w_offset * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
T ele = output_data[index];
int maxIndex = -1;
bool stop = false;
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
int input_data_idx = channel_last
? (h * input_width + w) * channels + c_offset
: h * input_width + w;
if (ele == input_data[input_data_idx]) {
maxIndex = input_data_idx;
stop = true;
}
}
}
if (maxIndex != -1) {
// atomic add
paddle::platform::CudaAtomicAdd(input_grad + maxIndex,
output_grad[index]);
}
}
}
template <typename PoolProcess, typename T>
void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input,
const std::vector<int>& input_shape,
const std::vector<int>& output_shape,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
T* output,
gpuStream_t stream,
PoolProcess pool_compute) {
const int batch_size = input_shape[0];
const int input_channels = input_shape[1];
const int input_height = input_shape[2];
const int input_width = input_shape[3];
const int output_channels = output_shape[1];
const int output_height = output_shape[2];
const int output_width = output_shape[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
// backends::gpu::ChangeThreadNum(context, &thread_num);
thread_num = 512;
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, stream>>>(nthreads,
input,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
pool_divmods,
pool_compute,
exclusive,
adaptive,
output);
}
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dFunctor<phi::GPUContext, PoolProcess, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
pool_divmods,
pool_process,
exclusive,
adaptive,
output_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output->dims()[3] : output->dims()[1];
const int output_height =
channel_last ? output->dims()[1] : output->dims()[2];
const int output_width =
channel_last ? output->dims()[2] : output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
pool_divmods,
pool_process,
exclusive,
adaptive,
output_data,
channel_last);
}
};
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dGradFunctor<phi::GPUContext, PoolProcess, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * input_channels * input_height * input_width;
auto pool_divmods = FastDivModForPoolingWithMoreStaff(input_channels,
input_width,
input_height,
ksize_width,
ksize_height,
stride_width,
stride_height);
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(context, nthreads);
KernelPool2DGrad<T, PoolProcess><<<config.block_per_grid,
config.thread_per_block,
0,
context.stream()>>>(nthreads,
input_data,
output_data,
output_grad_data,
output_width,
output_height,
input_width,
input_height,
ksize_width,
ksize_height,
stride_width,
stride_height,
padding_width,
padding_height,
pool_divmods,
pool_process,
exclusive,
adaptive,
input_grad_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output.dims()[3] : output.dims()[1];
const int output_height =
channel_last ? output.dims()[1] : output.dims()[2];
const int output_width = channel_last ? output.dims()[2] : output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * input_channels * input_height * input_width;
auto pool_divmods = FastDivModForPoolingWithMoreStaff(input_channels,
input_width,
input_height,
ksize_width,
ksize_height,
stride_width,
stride_height);
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(context, nthreads);
KernelPool2DGrad<T, PoolProcess><<<config.block_per_grid,
config.thread_per_block,
0,
context.stream()>>>(nthreads,
input_data,
output_data,
output_grad_data,
output_width,
output_height,
input_width,
input_height,
ksize_width,
ksize_height,
stride_width,
stride_height,
padding_width,
padding_height,
pool_divmods,
pool_process,
exclusive,
adaptive,
input_grad_data,
channel_last);
}
};
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename T>
class MaxPool2dGradFunctor<phi::GPUContext, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
DenseTensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
input_grad_data,
pool_divmods);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
DenseTensor* input_grad) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output.dims()[3] : output.dims()[1];
const int output_height =
channel_last ? output.dims()[1] : output.dims()[2];
const int output_width = channel_last ? output.dims()[2] : output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
input_grad_data,
pool_divmods,
channel_last);
}
};
template class Pool2dDirectCUDAFunctor<MaxPool<float>, float>;
template class Pool2dDirectCUDAFunctor<AvgPool<float>, float>;
template class MaxPool2dGradFunctor<phi::GPUContext, float>;
template class MaxPool2dGradFunctor<phi::GPUContext, double>;
template class MaxPool2dGradFunctor<phi::GPUContext, dtype::float16>;
template class Pool2dFunctor<phi::GPUContext, MaxPool<float>, float>;
template class Pool2dFunctor<phi::GPUContext, AvgPool<float>, float>;
template class Pool2dGradFunctor<phi::GPUContext, MaxPoolGrad<float>, float>;
template class Pool2dGradFunctor<phi::GPUContext, AvgPoolGrad<float>, float>;
template class Pool2dFunctor<phi::GPUContext, MaxPool<double>, double>;
template class Pool2dFunctor<phi::GPUContext, AvgPool<double>, double>;
template class Pool2dGradFunctor<phi::GPUContext, MaxPoolGrad<double>, double>;
template class Pool2dGradFunctor<phi::GPUContext, AvgPoolGrad<double>, double>;
template class Pool2dFunctor<phi::GPUContext,
MaxPool<dtype::float16>,
dtype::float16>;
template class Pool2dFunctor<phi::GPUContext,
AvgPool<dtype::float16>,
dtype::float16>;
template class Pool2dGradFunctor<phi::GPUContext,
MaxPoolGrad<dtype::float16>,
dtype::float16>;
template class Pool2dGradFunctor<phi::GPUContext,
AvgPoolGrad<dtype::float16>,
dtype::float16>;
template <typename PoolProcess, typename T>
__global__ void KernelPool3D(const int nthreads,
const T* input_data,
const int channels,
const int input_depth,
const int input_height,
const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* output_data,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw, ph, pd, c, batch_idx;
if (!channel_last) {
pw = index % output_width;
ph = (index / output_width) % output_height;
pd = (index / output_width / output_height) % output_depth;
c = (index / output_width / output_height / output_depth) % channels;
batch_idx =
index / output_width / output_height / output_depth / channels;
} else {
c = index % channels;
pw = (index / channels) % output_width;
ph = (index / channels / output_width) % output_height;
pd = (index / channels / output_width / output_height) % output_depth;
batch_idx =
index / channels / output_width / output_height / output_depth;
}
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
int input_data_stride;
if (!channel_last) { /* NCDHW */
input_data_stride =
(batch_idx * channels + c) * input_depth * input_height * input_width;
} else { /* NDHWC */
input_data_stride =
batch_idx * input_depth * input_height * input_width * channels;
}
input_data += input_data_stride;
T ele = pool_process.initial();
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
auto input_data_idx =
channel_last
? ((d * input_height + h) * input_width + w) * channels + c
: (d * input_height + h) * input_width + w;
pool_process.compute(input_data[input_data_idx], &ele);
}
}
}
int pool_size = (exclusive || adaptive)
? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename T, typename PoolProcess>
__global__ void KernelPool3DGrad(const int nthreads,
const T* __restrict__ input_data,
const T* __restrict__ output_data,
const T* __restrict__ output_grad,
const int channels,
const int input_depth,
const int input_height,
const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* input_grad,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset, h_offset, d_offset, c_offset, batch_idx, output_stride;
T input = static_cast<T>(0);
if (!channel_last) { /* "NCDHW" */
w_offset = index % input_width + padding_width;
h_offset = (index / input_width) % input_height + padding_height;
d_offset =
(index / input_width / input_height) % input_depth + padding_depth;
c_offset = (index / input_width / input_height / input_depth) % channels;
batch_idx = index / input_width / input_height / input_depth / channels;
output_stride = (batch_idx * channels + c_offset) * output_depth *
output_height * output_width;
} else { /* "NDHWC" */
c_offset = index % channels;
w_offset = (index / channels) % input_width + padding_width;
h_offset =
(index / channels / input_width) % input_height + padding_height;
d_offset = (index / channels / input_width / input_height) % input_depth +
padding_depth;
batch_idx = index / channels / input_width / input_height / input_depth;
output_stride =
batch_idx * output_depth * output_height * output_width * channels;
}
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = AdaptStartIndex(d_offset, output_depth, input_depth);
pdend = AdaptEndIndex(d_offset, output_depth, input_depth);
phstart = AdaptStartIndex(h_offset, output_height, input_height);
phend = AdaptEndIndex(h_offset, output_height, input_height);
pwstart = AdaptStartIndex(w_offset, output_width, input_width);
pwend = AdaptEndIndex(w_offset, output_width, input_width);
} else {
pdstart = (d_offset < ksize_depth)
? 0
: (d_offset - ksize_depth) / stride_depth + 1;
phstart = (h_offset < ksize_height)
? 0
: (h_offset - ksize_height) / stride_height + 1;
pwstart = (w_offset < ksize_width)
? 0
: (w_offset - ksize_width) / stride_width + 1;
pdend = min((d_offset) / stride_depth + 1, output_depth);
phend = min((h_offset) / stride_height + 1, output_height);
pwend = min((w_offset) / stride_width + 1, output_width);
}
if (pool_process.use_x) {
input = input_data[index];
output_data += output_stride;
}
output_grad += output_stride;
T input_grad_data = static_cast<T>(0.0);
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int pool_size;
if (adaptive) {
pool_size =
static_cast<int>(
ceil(static_cast<double>(input_depth) / ksize_depth)) *
static_cast<int>(
ceil(static_cast<double>(input_height) / ksize_height)) *
static_cast<int>(
ceil(static_cast<double>(input_width) / ksize_width));
} else {
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
pool_size =
exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
}
int output_sub_idx =
channel_last
? ((pd * output_height + ph) * output_width + pw) * channels +
c_offset
: (pd * output_height + ph) * output_width + pw;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input,
ouput_value,
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
}
input_grad[index] = input_grad_data;
}
}
template <typename T>
__global__ void KernelMaxPool3DGrad(const int nthreads,
const T* input_data,
const T* output_data,
const T* output_grad,
const int channels,
const int input_depth,
const int input_height,
const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
T* input_grad,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw, ph, pd, c, batch_idx;
if (!channel_last) { /*NCDHW*/
pw = index % output_width;
ph = (index / output_width) % output_height;
pd = (index / output_width / output_height) % output_depth;
c = (index / output_width / output_height / output_depth) % channels;
batch_idx =
index / output_width / output_height / output_depth / channels;
} else { /*NDHWC*/
c = index % channels;
pw = (index / channels) % output_width;
ph = (index / channels / output_width) % output_height;
pd = (index / channels / output_width / output_height) % output_depth;
batch_idx =
index / channels / output_width / output_height / output_depth;
}
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = output_data[index];
bool stop = false;
int maxIdx = -1;
int input_stride;
if (!channel_last) {
input_stride =
(batch_idx * channels + c) * input_depth * input_height * input_width;
} else {
input_stride =
batch_idx * input_depth * input_height * input_width * channels;
}
input_data += input_stride;
input_grad += input_stride;
for (int d = dstart; d < dend && !stop; ++d) {
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
int input_data_idx =
channel_last
? ((d * input_height + h) * input_width + w) * channels + c
: (d * input_height + h) * input_width + w;
if (ele == input_data[input_data_idx]) {
stop = true;
maxIdx = input_data_idx;
}
}
}
}
if (maxIdx != -1) {
// atomic add
paddle::platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]);
}
}
}
template <typename PoolProcess, typename T>
void Pool3dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input,
const std::vector<int>& input_shape,
const std::vector<int>& output_shape,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
T* output,
gpuStream_t stream,
PoolProcess pool_compute) {
const int batch_size = input_shape[0];
const int input_channels = input_shape[1];
const int input_depth = input_shape[2];
const int input_height = input_shape[3];
const int input_width = input_shape[4];
const int output_channels = output_shape[1];
const int output_depth = output_shape[2];
const int output_height = output_shape[3];
const int output_width = output_shape[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
thread_num = 512;
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
KernelPool3D<PoolProcess, T><<<grid, threads, 0, stream>>>(nthreads,
input,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_compute,
exclusive,
adaptive,
output);
}
/*
* Tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dFunctor<phi::GPUContext, PoolProcess, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
output_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output->dims()[4] : output->dims()[1];
const int output_depth =
channel_last ? output->dims()[1] : output->dims()[2];
const int output_height =
channel_last ? output->dims()[2] : output->dims()[3];
const int output_width =
channel_last ? output->dims()[3] : output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
output_data,
channel_last);
}
};
/*
* Tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dGradFunctor<phi::GPUContext, PoolProcess, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool3DGrad<T, PoolProcess><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
input_grad_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output.dims()[4] : output.dims()[1];
const int output_depth = channel_last ? output.dims()[1] : output.dims()[2];
const int output_height =
channel_last ? output.dims()[2] : output.dims()[3];
const int output_width = channel_last ? output.dims()[3] : output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool3DGrad<T, PoolProcess><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
input_grad_data,
channel_last); // add channel_last
}
};
/*
* tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <class T>
class MaxPool3dGradFunctor<phi::GPUContext, T> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
DenseTensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
input_grad_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
DenseTensor* input_grad) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output.dims()[4] : output.dims()[1];
const int output_depth = channel_last ? output.dims()[1] : output.dims()[2];
const int output_height =
channel_last ? output.dims()[2] : output.dims()[3];
const int output_width = channel_last ? output.dims()[3] : output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
output_data,
output_grad_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
input_grad_data,
channel_last); // add channel_last
}
};
template class Pool3dDirectCUDAFunctor<MaxPool<float>, float>;
template class Pool3dDirectCUDAFunctor<AvgPool<float>, float>;
template class MaxPool3dGradFunctor<phi::GPUContext, float>;
template class MaxPool3dGradFunctor<phi::GPUContext, double>;
template class MaxPool3dGradFunctor<phi::GPUContext, dtype::float16>;
template class Pool3dFunctor<phi::GPUContext, MaxPool<float>, float>;
template class Pool3dFunctor<phi::GPUContext, AvgPool<float>, float>;
template class Pool3dGradFunctor<phi::GPUContext, MaxPoolGrad<float>, float>;
template class Pool3dGradFunctor<phi::GPUContext, AvgPoolGrad<float>, float>;
template class Pool3dFunctor<phi::GPUContext, MaxPool<double>, double>;
template class Pool3dFunctor<phi::GPUContext, AvgPool<double>, double>;
template class Pool3dGradFunctor<phi::GPUContext, MaxPoolGrad<double>, double>;
template class Pool3dGradFunctor<phi::GPUContext, AvgPoolGrad<double>, double>;
template class Pool3dFunctor<phi::GPUContext,
MaxPool<dtype::float16>,
dtype::float16>;
template class Pool3dFunctor<phi::GPUContext,
AvgPool<dtype::float16>,
dtype::float16>;
template class Pool3dGradFunctor<phi::GPUContext,
MaxPoolGrad<dtype::float16>,
dtype::float16>;
template class Pool3dGradFunctor<phi::GPUContext,
AvgPoolGrad<dtype::float16>,
dtype::float16>;
template <typename T1, typename T2>
__global__ void KernelMaxPool2dWithIdx(const int nthreads,
const T1* input_data,
const int channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
bool adaptive,
T1* output_data,
T2* mask_data,
FastDivModForPooling divmods) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int hstart, hend, wstart, wend;
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
false,
divmods,
0,
0,
input_width,
input_height,
&w_offset,
&h_offset,
&c_offset,
&input_offset);
input_data += input_offset;
if (adaptive) {
hstart = AdaptStartIndex(h_offset, input_height, output_height);
hend = AdaptEndIndex(h_offset, input_height, output_height);
wstart = AdaptStartIndex(w_offset, input_width, output_width);
wend = AdaptEndIndex(w_offset, input_width, output_width);
} else {
hstart = h_offset * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = w_offset * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
T1 ele = -FLT_MAX;
int max_index = -1;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * input_width + w;
if (ele < input_data[input_index]) {
max_index = input_index;
ele = input_data[input_index];
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool2DWithIdxGrad(const int nthreads,
const T1* output_grad,
const T2* mask_data,
const int channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
bool adaptive,
T1* input_grad,
FastDivModForPooling divmods) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int phstart, phend, pwstart, pwend;
int w_offset, h_offset, c_offset, output_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
false,
divmods,
0,
0,
output_width,
output_height,
&w_offset,
&h_offset,
&c_offset,
&output_offset);
mask_data += output_offset;
output_grad += output_offset;
if (adaptive) {
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 input_grad_data = 0;
int input_current_featuremap_idx = h_offset * input_width + w_offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_data[ph * output_width + pw] == input_current_featuremap_idx)
input_grad_data += output_grad[ph * output_width + pw];
}
}
input_grad[index] = input_grad_data;
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexFunctor<phi::GPUContext, T1, T2> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool adaptive,
DenseTensor* output,
DenseTensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T1* input_data = input.data<T1>();
T1* output_data = context.template Alloc<T1>(output);
T2* mask_data = context.template Alloc<T2>(mask);
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelMaxPool2dWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
adaptive,
output_data,
mask_data,
pool_divmods);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor<phi::GPUContext, T1, T2> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& output_grad,
const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool adaptive,
DenseTensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_height = input_grad->dims()[2];
const int input_width = input_grad->dims()[3];
const int output_height = output_grad.dims()[2];
const int output_width = output_grad.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = context.template Alloc<T1>(input_grad);
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, input_width, input_height);
KernelMaxPool2DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads,
output_grad_data,
mask_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
adaptive,
input_grad_data,
pool_divmods);
}
};
template class MaxPool2dWithIndexFunctor<phi::GPUContext, float, int>;
template class MaxPool2dWithIndexGradFunctor<phi::GPUContext, float, int>;
template class MaxPool2dWithIndexFunctor<phi::GPUContext, double, int>;
template class MaxPool2dWithIndexGradFunctor<phi::GPUContext, double, int>;
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdx(const int nthreads,
const T1* input_data,
const int channels,
const int input_depth,
const int input_height,
const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
bool adaptive,
T1* output_data,
T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
T1 ele = -FLT_MAX;
int max_index = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (ele < input_data[(d * input_height + h) * input_width + w]) {
max_index = (d * input_height + h) * input_width + w;
ele = input_data[max_index];
}
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdxGrad(const int nthreads,
const T1* output_grad,
const T2* mask,
const int channels,
const int input_depth,
const int input_height,
const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
bool adaptive,
T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int d_offset = (index / input_width / input_height) % input_depth;
int c_offset =
(index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = d_offset * output_depth / input_depth;
pdend =
min((d_offset + 1) * output_depth / input_depth + 1, output_depth);
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
pdstart =
(d_offset + padding_depth < ksize_depth)
? 0
: (d_offset + padding_depth - ksize_depth) / stride_depth + 1;
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
pdend = min((d_offset + padding_depth) / stride_depth + 1, output_depth);
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 input_grad_data = 0;
int input_current_feature_map_idx =
(d_offset * input_height + h_offset) * input_width + w_offset;
int output_idx = (batch_idx * channels + c_offset) * output_depth *
output_height * output_width;
mask += output_idx;
output_grad += output_idx;
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[(pd * output_height + ph) * output_width + pw] ==
input_current_feature_map_idx)
input_grad_data +=
output_grad[(pd * output_height + ph) * output_width + pw];
}
}
}
input_grad[index] = input_grad_data;
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexFunctor<phi::GPUContext, T1, T2> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool adaptive,
DenseTensor* output,
DenseTensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* input_data = input.data<T1>();
T1* output_data = context.template Alloc<T1>(output);
T2* mask_data = context.template Alloc<T2>(mask);
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads,
input_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
adaptive,
output_data,
mask_data);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor<phi::GPUContext, T1, T2> {
public:
void operator()(const phi::GPUContext& context,
const DenseTensor& output_grad,
const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool adaptive,
DenseTensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_depth = input_grad->dims()[2];
const int input_height = input_grad->dims()[3];
const int input_width = input_grad->dims()[4];
const int output_depth = output_grad.dims()[2];
const int output_height = output_grad.dims()[3];
const int output_width = output_grad.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* output_grad_data = output_grad.data<T1>();
const T2* mask_data = mask.data<T2>();
T1* input_grad_data = context.template Alloc<T1>(input_grad);
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads,
output_grad_data,
mask_data,
input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
adaptive,
input_grad_data);
}
};
template class MaxPool3dWithIndexFunctor<phi::GPUContext, float, int>;
template class MaxPool3dWithIndexGradFunctor<phi::GPUContext, float, int>;
template class MaxPool3dWithIndexFunctor<phi::GPUContext, double, int>;
template class MaxPool3dWithIndexGradFunctor<phi::GPUContext, double, int>;
} // namespace funcs
} // namespace phi
|
29dc4a783a26237aba51c991aa9eda285f18b80d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_3d_layer_updater_cuda_fermi.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "space_filling_curve.h"
#include "packed_config.h"
#include "space_filling_curve.h"
#include "../convolution_layer.h"
texture<float, hipTextureType1D, hipReadModeElementType> input_tex_ref;
texture<float, hipTextureType1D, hipReadModeElementType> output_tex_ref;
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
namespace nnforge
{
namespace cuda
{
template<int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_3d_tex_upd_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int base_input_feature_map_id = conf.get_val(4);
int weight_count_per_output_feature_map = window_depth * window_height * window_width * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x + texture_offset;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_depth * window_height * window_width);
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - window_width;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_3d_tex_exact_upd_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int base_input_feature_map_id = conf.get_val(4);
int weight_count_per_output_feature_map = window_depth * window_height * WINDOW_WIDTH * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x + texture_offset;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_depth * window_height * WINDOW_WIDTH);
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - WINDOW_WIDTH;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
extern __shared__ float arr_sh[];
__global__ void convolution_3d_update_biases_upd_kernel_fermi(
float * __restrict biases,
const float * __restrict output_errors,
const float * __restrict training_speed,
int output_feature_map_count,
int output_elem_count_per_feature_map,
int min_iteration_count)
{
int thread_id = threadIdx.x;
int output_feature_map_id = blockIdx.y;
int entry_id = blockIdx.z;
int threadblock_size = blockDim.x;
float sum = 0.0F;
const float * current_error = output_errors + (entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map;
int current_output_neuron_id = thread_id;
for(int i = 0; i < min_iteration_count; ++i)
{
sum += current_error[current_output_neuron_id];
current_output_neuron_id += threadblock_size;
}
if (current_output_neuron_id < output_elem_count_per_feature_map)
sum += current_error[current_output_neuron_id];
volatile float * arr = arr_sh;
arr[thread_id] = sum;
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
if (lane_id < tx)
arr[thread_id] += arr[thread_id + tx];
}
sum = arr[thread_id];
if (lane_id == 0)
{
int offset = entry_id * output_feature_map_count + output_feature_map_id;
float current_training_speed_val = training_speed[offset];
atomicAdd(biases + offset, sum * current_training_speed_val);
}
}
template<int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_3d_deriviative_tex_upd_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int input_feature_map_id = conf.get_val(3);
int base_output_feature_map_id = conf.get_val(4);
int weight_count_per_input_feature_map = window_depth * window_height * window_width;
int output_elem_id = (((entry_id * output_feature_map_count + base_output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * weight_count_per_input_feature_map);
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit_x = b_fit_y && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit_x)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
current_weights++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit_x = b_fit_y && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit_x)
{
float inp = tex1Dfetch(output_tex_ref, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * current_weights[weight_offsets[i]];
}
}
current_weights++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
current_weights += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_3d_deriviative_tex_exact_upd_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int input_feature_map_id = conf.get_val(3);
int base_output_feature_map_id = conf.get_val(4);
int weight_count_per_input_feature_map = window_depth * window_height * WINDOW_WIDTH;
int output_elem_id = (((entry_id * output_feature_map_count + base_output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * weight_count_per_input_feature_map);
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit_x = b_fit_y && (((1 << i) & mask) != 0);
if (b_fit_x)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
current_weights++;
}
output_elem_id -= output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
current_weights += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<bool single_output_y_group>
__global__ void convolution_3d_update_weights_upd_kernel_fermi(
float * __restrict weights,
const float * __restrict output_errors,
const float * __restrict training_speed,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_z_group_count,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int weight_x = (blockIdx.x * blockDim.x + threadIdx.x) * WINDOW_WIDTH_LOCAL;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (packed_config_id < packed_config_count) && (entry_id < entry_count) && (weight_x < window_width);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int output_z_start_id = conf.get_val(4);
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
const float * current_output_errors = output_errors + (int)((((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z_start_id) * output_height) * output_width);
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_depth + weight_z + output_z_start_id) * input_height + weight_y) * input_width + texture_offset + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int output_z = output_z_start_id; output_z < output_depth; output_z += output_z_group_count)
{
for(int output_y = 0; output_y < output_height; output_y++)
{
float input_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_buf[i] = tex1Dfetch(input_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch(input_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_buf[j];
current_output_errors++;
input_elem_id++;
}
input_elem_id += window_width - WINDOW_WIDTH_LOCAL;
}
current_output_errors += output_height * output_width * (output_z_group_count - 1);
input_elem_id += input_height * input_width * (output_z_group_count - 1) + (input_width * (window_height - 1));
}
int offset = ((((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * window_width;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
template<int WINDOW_WIDTH, bool single_output_y_group>
__global__ void convolution_3d_update_weights_exact_upd_kernel_fermi(
float * __restrict weights,
const float * __restrict output_errors,
const float * __restrict training_speed,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_z_group_count,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (packed_config_id < packed_config_count) && (entry_id < entry_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int output_z_start_id = conf.get_val(4);
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
const float * current_output_errors = output_errors + (int)((((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z_start_id) * output_height) * output_width);
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_depth + weight_z + output_z_start_id) * input_height + weight_y) * input_width + texture_offset;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int output_z = output_z_start_id; output_z < output_depth; output_z += output_z_group_count)
{
for(int output_y = 0; output_y < output_height; output_y++)
{
float input_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_buf[i] = tex1Dfetch(input_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH - 1] = tex1Dfetch(input_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_buf[j];
current_output_errors++;
input_elem_id++;
}
}
current_output_errors += output_height * output_width * (output_z_group_count - 1);
input_elem_id += input_height * input_width * (output_z_group_count - 1) + (input_width * (window_height - 1));
}
int offset = ((((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * WINDOW_WIDTH;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
convolution_3d_layer_updater_cuda_fermi::convolution_3d_layer_updater_cuda_fermi()
{
input_tex_ref.addressMode[0] = hipAddressModeBorder;
input_tex_ref.normalized = false;
output_tex_ref.addressMode[0] = hipAddressModeBorder;
output_tex_ref.normalized = false;
input_tex_ref.addressMode[0] = hipAddressModeBorder;
input_tex_ref.normalized = false;
}
convolution_3d_layer_updater_cuda_fermi::~convolution_3d_layer_updater_cuda_fermi()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const, single_input_feature_map_group) \
hipLaunchKernelGGL(( convolution_3d_tex_exact_upd_kernel_fermi<window_width_const,block_size_const,single_input_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, *data[0], *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, packed_config_count);
#define launch_exact_kernel_const(window_width, block_size_const, single_input_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const, single_input_feature_map_group); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const, single_input_feature_map_group); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const, single_input_feature_map_group); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const, single_input_feature_map_group); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const, single_input_feature_map_group); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const, single_input_feature_map_group); \
break; \
};
#define launch_exact_kernel(window_width, block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5, single_input_feature_map_group); \
break; \
};
#define launch_kernel_const(block_size_const, single_input_feature_map_group) \
hipLaunchKernelGGL(( convolution_3d_tex_upd_kernel_fermi<block_size_const,single_input_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, *data[0], *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, packed_config_count);
#define launch_kernel(block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1, single_input_feature_map_group); \
break; \
case 2: \
launch_kernel_const(2, single_input_feature_map_group); \
break; \
case 3: \
launch_kernel_const(3, single_input_feature_map_group); \
break; \
case 4: \
launch_kernel_const(4, single_input_feature_map_group); \
break; \
case 5: \
launch_kernel_const(5, single_input_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const, single_output_feature_map_group) \
hipLaunchKernelGGL(( convolution_3d_deriviative_tex_exact_upd_kernel_fermi<window_width_const,block_size_const,single_output_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *data[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, packed_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const, single_output_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const, single_output_feature_map_group); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const, single_output_feature_map_group); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const, single_output_feature_map_group); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const, single_output_feature_map_group); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const, single_output_feature_map_group); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const, single_output_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5, single_output_feature_map_group); \
break; \
};
#define launch_backprop_kernel_const(block_size_const, single_output_feature_map_group) \
hipLaunchKernelGGL(( convolution_3d_deriviative_tex_upd_kernel_fermi<block_size_const,single_output_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *data[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, packed_config_count);
#define launch_backprop_kernel(block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_kernel_const(2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_kernel_const(3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_kernel_const(4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_kernel_const(5, single_output_feature_map_group); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const, single_output_y_group_const) \
hipLaunchKernelGGL(( convolution_3d_update_weights_exact_upd_kernel_fermi<window_width_const, single_output_y_group_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[0], *output_errors_buffer, *training_speed[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_z_group_count, texture_offset, entry_count, different_input, packed_config_count);
#define launch_update_weights_exact_kernel(window_width, single_output_y_group_const) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1, single_output_y_group_const); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2, single_output_y_group_const); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3, single_output_y_group_const); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4, single_output_y_group_const); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5, single_output_y_group_const); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6, single_output_y_group_const); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7, single_output_y_group_const); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8, single_output_y_group_const); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9, single_output_y_group_const); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10, single_output_y_group_const); \
break; \
};
#define launch_update_weights_kernel_const(single_output_y_group_const) \
hipLaunchKernelGGL(( convolution_3d_update_weights_upd_kernel_fermi<single_output_y_group_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[0], *output_errors_buffer, *training_speed[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_z_group_count, texture_offset, entry_count, different_input, packed_config_count);
void convolution_3d_layer_updater_cuda_fermi::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
size_t texture_offset;
cuda_safe_call(hipBindTexture(&texture_offset, input_tex_ref, (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), desc, input_elem_count_per_entry * sizeof(float) * (different_input ? entry_count : 1)));
texture_offset /= sizeof(float);
if (forward_input_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*output_neurons_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
int packed_config_count = forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count * forward_input_feature_map_group_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[0]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (forward_input_feature_map_group_count == 1)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, true);
}
else
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, false);
}
}
else
{
if (forward_input_feature_map_group_count == 1)
{
launch_kernel(forward_x_block_size, true);
}
else
{
launch_kernel(forward_x_block_size, false);
}
}
}
void convolution_3d_layer_updater_cuda_fermi::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (!different_input)
throw neural_network_exception("convolution_2d_layer_updater_cuda_fermi is not able to backprop to the same input");
if (!backprop_required)
throw neural_network_exception("convolution_2d_layer_updater_cuda_fermi is not configured to do backprop but requested to");
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
cuda_safe_call(hipBindTexture(0, output_tex_ref, *output_errors_buffer, desc, output_elem_count_per_entry * entry_count * sizeof(float)));
if (backward_output_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
int packed_config_count = backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count * backward_output_feature_map_group_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, true);
}
else
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, false);
}
}
else
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_kernel(backward_x_block_size, true);
}
else
{
launch_backprop_kernel(backward_x_block_size, false);
}
}
}
void convolution_3d_layer_updater_cuda_fermi::enqueue_update_weights(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& training_speed,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Update biases
{
int threadblock_size = get_threadblock_size_biases(output_elem_count_per_feature_map);
dim3 grid_size(1, output_configuration_specific.feature_map_count, entry_count);
dim3 block_size(threadblock_size, 1, 1);
int smem_size = threadblock_size * sizeof(float);
int min_iteration_count = output_elem_count_per_feature_map / threadblock_size;
hipLaunchKernelGGL(( convolution_3d_update_biases_upd_kernel_fermi), dim3(grid_size), dim3(block_size), smem_size, stream_id,
*data[1],
*output_errors_buffer,
*training_speed[1],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
min_iteration_count);
}
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
size_t texture_offset;
cuda_safe_call(hipBindTexture(&texture_offset, input_tex_ref, (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), desc, input_elem_count_per_entry * sizeof(float) * (different_input ? entry_count : 1)));
texture_offset /= sizeof(float);
int packed_config_count = window_sizes[1] * window_sizes[2] * updater_output_z_group_count * updater_output_feature_map_block_count * input_configuration_specific.feature_map_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[1]);
// Update weights
{
if (updater_window_x_block_count == 1)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (updater_output_z_group_count == 1)
{
launch_update_weights_exact_kernel(window_sizes[0], true);
}
else
{
launch_update_weights_exact_kernel(window_sizes[0], false);
}
}
else
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
updater_window_x_block_count,
packed_config_count,
entry_count);
if (updater_output_z_group_count == 1)
{
launch_update_weights_kernel_const(true);
}
else
{
launch_update_weights_kernel_const(false);
}
}
}
}
int convolution_3d_layer_updater_cuda_fermi::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_3d_layer_updater_cuda_fermi::updater_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
{
std::tr1::array<int, 3> size_list;
size_list[0] = window_sizes[1];
size_list[1] = window_sizes[2];
size_list[2] = input_configuration_specific.feature_map_count;
space_filling_curve<3>::fill_pattern(size_list, updater_config_ordered_list1);
}
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_3d_layer_updater_cuda_fermi::is_in_place_backprop() const
{
return false;
}
std::vector<unsigned int> convolution_3d_layer_updater_cuda_fermi::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_3d_layer_updater_cuda_fermi::get_threadblock_size_biases(int output_neuron_count)
{
int threadblock_size;
if (output_neuron_count < 128)
{
threadblock_size = (output_neuron_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (output_neuron_count + 128 - 1) / 128;
threadblock_size = (output_neuron_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
std::vector<size_t> convolution_3d_layer_updater_cuda_fermi::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(packed_config<5>) * forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * forward_output_feature_map_block_count);
res.push_back(sizeof(packed_config<5>) * window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
res.push_back(sizeof(packed_config<5>) * backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * output_configuration_specific.feature_map_count * backward_input_feature_map_block_count);
return res;
}
void convolution_3d_layer_updater_cuda_fermi::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(int input_feature_map_group_id = 0; input_feature_map_group_id < forward_input_feature_map_group_count; ++input_feature_map_group_id)
{
new_elem.set_val(4, input_feature_map_group_id * forward_input_feature_map_group_size);
for(int output_feature_map_block_id = 0; output_feature_map_block_id < forward_output_feature_map_block_count; ++output_feature_map_block_id)
{
new_elem.set_val(3, output_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < output_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(2, z);
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(1, y);
for(int x = 0; x < forward_x_block_count; ++x)
{
new_elem.set_val(0, x * forward_x_block_size);
task_list.push_back(new_elem);
}
}
}
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[0], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(std::vector<std::tr1::array<int, 2> >::const_iterator it2 = updater_config_ordered_list2.begin(); it2 != updater_config_ordered_list2.end(); ++it2)
{
new_elem.set_val(3, it2->at(0) * FEATURE_MAP_BLOCK_SIZE);
new_elem.set_val(4, it2->at(1));
for(std::vector<std::tr1::array<int, 3> >::const_iterator it1 = updater_config_ordered_list1.begin(); it1 != updater_config_ordered_list1.end(); ++it1)
{
new_elem.set_val(0, it1->at(0));
new_elem.set_val(1, it1->at(1));
new_elem.set_val(2, it1->at(2));
task_list.push_back(new_elem);
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), hipMemcpyHostToDevice));
}
if (backprop_required)
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(int output_feature_map_group_id = 0; output_feature_map_group_id < backward_output_feature_map_group_count; ++output_feature_map_group_id)
{
new_elem.set_val(4, output_feature_map_group_id * backward_output_feature_map_group_size);
for(int input_feature_map_block_id = 0; input_feature_map_block_id < backward_input_feature_map_block_count; ++input_feature_map_block_id)
{
new_elem.set_val(3, input_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < input_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(2, z);
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(1, y);
for(int x = 0; x < backward_x_block_count; ++x)
{
new_elem.set_val(0, x * backward_x_block_size + (backward_x_block_size - 1));
task_list.push_back(new_elem);
}
}
}
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), hipMemcpyHostToDevice));
}
}
void convolution_3d_layer_updater_cuda_fermi::set_max_entry_count(unsigned int max_entry_count)
{
forward_input_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count * max_entry_count,
input_configuration_specific.feature_map_count);
forward_input_feature_map_group_size = (input_configuration_specific.feature_map_count + forward_input_feature_map_group_count - 1) / forward_input_feature_map_group_count;
updater_output_z_group_count = cuda_util::get_group_count(
*cuda_config,
updater_output_feature_map_block_count * input_configuration_specific.feature_map_count * max_entry_count * updater_window_x_block_count * window_sizes[1] * window_sizes[2],
output_configuration_specific.dimension_sizes[2]);
updater_output_z_group_size = (output_configuration_specific.dimension_sizes[2] + updater_output_z_group_count - 1) / updater_output_z_group_count;
{
std::tr1::array<int, 2> size_list;
size_list[0] = updater_output_feature_map_block_count;
size_list[1] = updater_output_z_group_count;
space_filling_curve<2>::fill_pattern(size_list, updater_config_ordered_list2);
}
if (backprop_required)
{
backward_output_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count * max_entry_count,
output_configuration_specific.feature_map_count);
backward_output_feature_map_group_size = (output_configuration_specific.feature_map_count + backward_output_feature_map_group_count - 1) / backward_output_feature_map_group_count;
}
}
}
}
| 29dc4a783a26237aba51c991aa9eda285f18b80d.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_3d_layer_updater_cuda_fermi.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "space_filling_curve.h"
#include "packed_config.h"
#include "space_filling_curve.h"
#include "../convolution_layer.h"
texture<float, cudaTextureType1D, cudaReadModeElementType> input_tex_ref;
texture<float, cudaTextureType1D, cudaReadModeElementType> output_tex_ref;
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
namespace nnforge
{
namespace cuda
{
template<int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_3d_tex_upd_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int base_input_feature_map_id = conf.get_val(4);
int weight_count_per_output_feature_map = window_depth * window_height * window_width * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x + texture_offset;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_depth * window_height * window_width);
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - window_width;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_3d_tex_exact_upd_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int base_input_feature_map_id = conf.get_val(4);
int weight_count_per_output_feature_map = window_depth * window_height * WINDOW_WIDTH * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x + texture_offset;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_depth * window_height * WINDOW_WIDTH);
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - WINDOW_WIDTH;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
extern __shared__ float arr_sh[];
__global__ void convolution_3d_update_biases_upd_kernel_fermi(
float * __restrict biases,
const float * __restrict output_errors,
const float * __restrict training_speed,
int output_feature_map_count,
int output_elem_count_per_feature_map,
int min_iteration_count)
{
int thread_id = threadIdx.x;
int output_feature_map_id = blockIdx.y;
int entry_id = blockIdx.z;
int threadblock_size = blockDim.x;
float sum = 0.0F;
const float * current_error = output_errors + (entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map;
int current_output_neuron_id = thread_id;
for(int i = 0; i < min_iteration_count; ++i)
{
sum += current_error[current_output_neuron_id];
current_output_neuron_id += threadblock_size;
}
if (current_output_neuron_id < output_elem_count_per_feature_map)
sum += current_error[current_output_neuron_id];
volatile float * arr = arr_sh;
arr[thread_id] = sum;
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
if (lane_id < tx)
arr[thread_id] += arr[thread_id + tx];
}
sum = arr[thread_id];
if (lane_id == 0)
{
int offset = entry_id * output_feature_map_count + output_feature_map_id;
float current_training_speed_val = training_speed[offset];
atomicAdd(biases + offset, sum * current_training_speed_val);
}
}
template<int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_3d_deriviative_tex_upd_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int input_feature_map_id = conf.get_val(3);
int base_output_feature_map_id = conf.get_val(4);
int weight_count_per_input_feature_map = window_depth * window_height * window_width;
int output_elem_id = (((entry_id * output_feature_map_count + base_output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * weight_count_per_input_feature_map);
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit_x = b_fit_y && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit_x)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
current_weights++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit_x = b_fit_y && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit_x)
{
float inp = tex1Dfetch(output_tex_ref, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * current_weights[weight_offsets[i]];
}
}
current_weights++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
current_weights += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_3d_deriviative_tex_exact_upd_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int input_feature_map_id = conf.get_val(3);
int base_output_feature_map_id = conf.get_val(4);
int weight_count_per_input_feature_map = window_depth * window_height * WINDOW_WIDTH;
int output_elem_id = (((entry_id * output_feature_map_count + base_output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * weight_count_per_input_feature_map);
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit_x = b_fit_y && (((1 << i) & mask) != 0);
if (b_fit_x)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
current_weights++;
}
output_elem_id -= output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
current_weights += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<bool single_output_y_group>
__global__ void convolution_3d_update_weights_upd_kernel_fermi(
float * __restrict weights,
const float * __restrict output_errors,
const float * __restrict training_speed,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_z_group_count,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int weight_x = (blockIdx.x * blockDim.x + threadIdx.x) * WINDOW_WIDTH_LOCAL;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (packed_config_id < packed_config_count) && (entry_id < entry_count) && (weight_x < window_width);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int output_z_start_id = conf.get_val(4);
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
const float * current_output_errors = output_errors + (int)((((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z_start_id) * output_height) * output_width);
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_depth + weight_z + output_z_start_id) * input_height + weight_y) * input_width + texture_offset + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int output_z = output_z_start_id; output_z < output_depth; output_z += output_z_group_count)
{
for(int output_y = 0; output_y < output_height; output_y++)
{
float input_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_buf[i] = tex1Dfetch(input_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch(input_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_buf[j];
current_output_errors++;
input_elem_id++;
}
input_elem_id += window_width - WINDOW_WIDTH_LOCAL;
}
current_output_errors += output_height * output_width * (output_z_group_count - 1);
input_elem_id += input_height * input_width * (output_z_group_count - 1) + (input_width * (window_height - 1));
}
int offset = ((((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * window_width;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
template<int WINDOW_WIDTH, bool single_output_y_group>
__global__ void convolution_3d_update_weights_exact_upd_kernel_fermi(
float * __restrict weights,
const float * __restrict output_errors,
const float * __restrict training_speed,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_z_group_count,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (packed_config_id < packed_config_count) && (entry_id < entry_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int output_z_start_id = conf.get_val(4);
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
const float * current_output_errors = output_errors + (int)((((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z_start_id) * output_height) * output_width);
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_depth + weight_z + output_z_start_id) * input_height + weight_y) * input_width + texture_offset;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int output_z = output_z_start_id; output_z < output_depth; output_z += output_z_group_count)
{
for(int output_y = 0; output_y < output_height; output_y++)
{
float input_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_buf[i] = tex1Dfetch(input_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH - 1] = tex1Dfetch(input_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_buf[j];
current_output_errors++;
input_elem_id++;
}
}
current_output_errors += output_height * output_width * (output_z_group_count - 1);
input_elem_id += input_height * input_width * (output_z_group_count - 1) + (input_width * (window_height - 1));
}
int offset = ((((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * WINDOW_WIDTH;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
convolution_3d_layer_updater_cuda_fermi::convolution_3d_layer_updater_cuda_fermi()
{
input_tex_ref.addressMode[0] = cudaAddressModeBorder;
input_tex_ref.normalized = false;
output_tex_ref.addressMode[0] = cudaAddressModeBorder;
output_tex_ref.normalized = false;
input_tex_ref.addressMode[0] = cudaAddressModeBorder;
input_tex_ref.normalized = false;
}
convolution_3d_layer_updater_cuda_fermi::~convolution_3d_layer_updater_cuda_fermi()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const, single_input_feature_map_group) \
convolution_3d_tex_exact_upd_kernel_fermi<window_width_const,block_size_const,single_input_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, *data[0], *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, packed_config_count);
#define launch_exact_kernel_const(window_width, block_size_const, single_input_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const, single_input_feature_map_group); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const, single_input_feature_map_group); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const, single_input_feature_map_group); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const, single_input_feature_map_group); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const, single_input_feature_map_group); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const, single_input_feature_map_group); \
break; \
};
#define launch_exact_kernel(window_width, block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5, single_input_feature_map_group); \
break; \
};
#define launch_kernel_const(block_size_const, single_input_feature_map_group) \
convolution_3d_tex_upd_kernel_fermi<block_size_const,single_input_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, *data[0], *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, packed_config_count);
#define launch_kernel(block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1, single_input_feature_map_group); \
break; \
case 2: \
launch_kernel_const(2, single_input_feature_map_group); \
break; \
case 3: \
launch_kernel_const(3, single_input_feature_map_group); \
break; \
case 4: \
launch_kernel_const(4, single_input_feature_map_group); \
break; \
case 5: \
launch_kernel_const(5, single_input_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const, single_output_feature_map_group) \
convolution_3d_deriviative_tex_exact_upd_kernel_fermi<window_width_const,block_size_const,single_output_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, *data[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, packed_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const, single_output_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const, single_output_feature_map_group); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const, single_output_feature_map_group); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const, single_output_feature_map_group); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const, single_output_feature_map_group); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const, single_output_feature_map_group); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const, single_output_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5, single_output_feature_map_group); \
break; \
};
#define launch_backprop_kernel_const(block_size_const, single_output_feature_map_group) \
convolution_3d_deriviative_tex_upd_kernel_fermi<block_size_const,single_output_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, *data[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, packed_config_count);
#define launch_backprop_kernel(block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_kernel_const(2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_kernel_const(3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_kernel_const(4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_kernel_const(5, single_output_feature_map_group); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const, single_output_y_group_const) \
convolution_3d_update_weights_exact_upd_kernel_fermi<window_width_const, single_output_y_group_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*data[0], *output_errors_buffer, *training_speed[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_z_group_count, texture_offset, entry_count, different_input, packed_config_count);
#define launch_update_weights_exact_kernel(window_width, single_output_y_group_const) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1, single_output_y_group_const); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2, single_output_y_group_const); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3, single_output_y_group_const); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4, single_output_y_group_const); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5, single_output_y_group_const); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6, single_output_y_group_const); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7, single_output_y_group_const); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8, single_output_y_group_const); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9, single_output_y_group_const); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10, single_output_y_group_const); \
break; \
};
#define launch_update_weights_kernel_const(single_output_y_group_const) \
convolution_3d_update_weights_upd_kernel_fermi<single_output_y_group_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*data[0], *output_errors_buffer, *training_speed[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_z_group_count, texture_offset, entry_count, different_input, packed_config_count);
void convolution_3d_layer_updater_cuda_fermi::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
size_t texture_offset;
cuda_safe_call(cudaBindTexture(&texture_offset, input_tex_ref, (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), desc, input_elem_count_per_entry * sizeof(float) * (different_input ? entry_count : 1)));
texture_offset /= sizeof(float);
if (forward_input_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*output_neurons_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
int packed_config_count = forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count * forward_input_feature_map_group_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[0]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (forward_input_feature_map_group_count == 1)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, true);
}
else
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, false);
}
}
else
{
if (forward_input_feature_map_group_count == 1)
{
launch_kernel(forward_x_block_size, true);
}
else
{
launch_kernel(forward_x_block_size, false);
}
}
}
void convolution_3d_layer_updater_cuda_fermi::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (!different_input)
throw neural_network_exception("convolution_2d_layer_updater_cuda_fermi is not able to backprop to the same input");
if (!backprop_required)
throw neural_network_exception("convolution_2d_layer_updater_cuda_fermi is not configured to do backprop but requested to");
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cuda_safe_call(cudaBindTexture(0, output_tex_ref, *output_errors_buffer, desc, output_elem_count_per_entry * entry_count * sizeof(float)));
if (backward_output_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
int packed_config_count = backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count * backward_output_feature_map_group_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, true);
}
else
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, false);
}
}
else
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_kernel(backward_x_block_size, true);
}
else
{
launch_backprop_kernel(backward_x_block_size, false);
}
}
}
void convolution_3d_layer_updater_cuda_fermi::enqueue_update_weights(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& training_speed,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Update biases
{
int threadblock_size = get_threadblock_size_biases(output_elem_count_per_feature_map);
dim3 grid_size(1, output_configuration_specific.feature_map_count, entry_count);
dim3 block_size(threadblock_size, 1, 1);
int smem_size = threadblock_size * sizeof(float);
int min_iteration_count = output_elem_count_per_feature_map / threadblock_size;
convolution_3d_update_biases_upd_kernel_fermi<<<grid_size, block_size, smem_size, stream_id>>>(
*data[1],
*output_errors_buffer,
*training_speed[1],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
min_iteration_count);
}
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
size_t texture_offset;
cuda_safe_call(cudaBindTexture(&texture_offset, input_tex_ref, (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), desc, input_elem_count_per_entry * sizeof(float) * (different_input ? entry_count : 1)));
texture_offset /= sizeof(float);
int packed_config_count = window_sizes[1] * window_sizes[2] * updater_output_z_group_count * updater_output_feature_map_block_count * input_configuration_specific.feature_map_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[1]);
// Update weights
{
if (updater_window_x_block_count == 1)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (updater_output_z_group_count == 1)
{
launch_update_weights_exact_kernel(window_sizes[0], true);
}
else
{
launch_update_weights_exact_kernel(window_sizes[0], false);
}
}
else
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
updater_window_x_block_count,
packed_config_count,
entry_count);
if (updater_output_z_group_count == 1)
{
launch_update_weights_kernel_const(true);
}
else
{
launch_update_weights_kernel_const(false);
}
}
}
}
int convolution_3d_layer_updater_cuda_fermi::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_3d_layer_updater_cuda_fermi::updater_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
{
std::tr1::array<int, 3> size_list;
size_list[0] = window_sizes[1];
size_list[1] = window_sizes[2];
size_list[2] = input_configuration_specific.feature_map_count;
space_filling_curve<3>::fill_pattern(size_list, updater_config_ordered_list1);
}
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_3d_layer_updater_cuda_fermi::is_in_place_backprop() const
{
return false;
}
std::vector<unsigned int> convolution_3d_layer_updater_cuda_fermi::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_3d_layer_updater_cuda_fermi::get_threadblock_size_biases(int output_neuron_count)
{
int threadblock_size;
if (output_neuron_count < 128)
{
threadblock_size = (output_neuron_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (output_neuron_count + 128 - 1) / 128;
threadblock_size = (output_neuron_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
std::vector<size_t> convolution_3d_layer_updater_cuda_fermi::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(packed_config<5>) * forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * forward_output_feature_map_block_count);
res.push_back(sizeof(packed_config<5>) * window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
res.push_back(sizeof(packed_config<5>) * backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * output_configuration_specific.feature_map_count * backward_input_feature_map_block_count);
return res;
}
void convolution_3d_layer_updater_cuda_fermi::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(int input_feature_map_group_id = 0; input_feature_map_group_id < forward_input_feature_map_group_count; ++input_feature_map_group_id)
{
new_elem.set_val(4, input_feature_map_group_id * forward_input_feature_map_group_size);
for(int output_feature_map_block_id = 0; output_feature_map_block_id < forward_output_feature_map_block_count; ++output_feature_map_block_id)
{
new_elem.set_val(3, output_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < output_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(2, z);
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(1, y);
for(int x = 0; x < forward_x_block_count; ++x)
{
new_elem.set_val(0, x * forward_x_block_size);
task_list.push_back(new_elem);
}
}
}
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[0], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(std::vector<std::tr1::array<int, 2> >::const_iterator it2 = updater_config_ordered_list2.begin(); it2 != updater_config_ordered_list2.end(); ++it2)
{
new_elem.set_val(3, it2->at(0) * FEATURE_MAP_BLOCK_SIZE);
new_elem.set_val(4, it2->at(1));
for(std::vector<std::tr1::array<int, 3> >::const_iterator it1 = updater_config_ordered_list1.begin(); it1 != updater_config_ordered_list1.end(); ++it1)
{
new_elem.set_val(0, it1->at(0));
new_elem.set_val(1, it1->at(1));
new_elem.set_val(2, it1->at(2));
task_list.push_back(new_elem);
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), cudaMemcpyHostToDevice));
}
if (backprop_required)
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(int output_feature_map_group_id = 0; output_feature_map_group_id < backward_output_feature_map_group_count; ++output_feature_map_group_id)
{
new_elem.set_val(4, output_feature_map_group_id * backward_output_feature_map_group_size);
for(int input_feature_map_block_id = 0; input_feature_map_block_id < backward_input_feature_map_block_count; ++input_feature_map_block_id)
{
new_elem.set_val(3, input_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < input_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(2, z);
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(1, y);
for(int x = 0; x < backward_x_block_count; ++x)
{
new_elem.set_val(0, x * backward_x_block_size + (backward_x_block_size - 1));
task_list.push_back(new_elem);
}
}
}
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), cudaMemcpyHostToDevice));
}
}
void convolution_3d_layer_updater_cuda_fermi::set_max_entry_count(unsigned int max_entry_count)
{
forward_input_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count * max_entry_count,
input_configuration_specific.feature_map_count);
forward_input_feature_map_group_size = (input_configuration_specific.feature_map_count + forward_input_feature_map_group_count - 1) / forward_input_feature_map_group_count;
updater_output_z_group_count = cuda_util::get_group_count(
*cuda_config,
updater_output_feature_map_block_count * input_configuration_specific.feature_map_count * max_entry_count * updater_window_x_block_count * window_sizes[1] * window_sizes[2],
output_configuration_specific.dimension_sizes[2]);
updater_output_z_group_size = (output_configuration_specific.dimension_sizes[2] + updater_output_z_group_count - 1) / updater_output_z_group_count;
{
std::tr1::array<int, 2> size_list;
size_list[0] = updater_output_feature_map_block_count;
size_list[1] = updater_output_z_group_count;
space_filling_curve<2>::fill_pattern(size_list, updater_config_ordered_list2);
}
if (backprop_required)
{
backward_output_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count * max_entry_count,
output_configuration_specific.feature_map_count);
backward_output_feature_map_group_size = (output_configuration_specific.feature_map_count + backward_output_feature_map_group_count - 1) / backward_output_feature_map_group_count;
}
}
}
}
|
a34763a3989f47934bd5a259baca544b27b6baf8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_feedforward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int layer_id = 1;
int *l = NULL;
hipMalloc(&l, XSIZE*YSIZE);
int *s = NULL;
hipMalloc(&s, XSIZE*YSIZE);
int *sw = NULL;
hipMalloc(&sw, XSIZE*YSIZE);
float *z_arr = NULL;
hipMalloc(&z_arr, XSIZE*YSIZE);
float *a_arr = NULL;
hipMalloc(&a_arr, XSIZE*YSIZE);
float *w_arr = NULL;
hipMalloc(&w_arr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_feedforward), dim3(gridBlock),dim3(threadBlock), 0, 0, layer_id,l,s,sw,z_arr,a_arr,w_arr);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_feedforward), dim3(gridBlock),dim3(threadBlock), 0, 0, layer_id,l,s,sw,z_arr,a_arr,w_arr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_feedforward), dim3(gridBlock),dim3(threadBlock), 0, 0, layer_id,l,s,sw,z_arr,a_arr,w_arr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a34763a3989f47934bd5a259baca544b27b6baf8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_feedforward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int layer_id = 1;
int *l = NULL;
cudaMalloc(&l, XSIZE*YSIZE);
int *s = NULL;
cudaMalloc(&s, XSIZE*YSIZE);
int *sw = NULL;
cudaMalloc(&sw, XSIZE*YSIZE);
float *z_arr = NULL;
cudaMalloc(&z_arr, XSIZE*YSIZE);
float *a_arr = NULL;
cudaMalloc(&a_arr, XSIZE*YSIZE);
float *w_arr = NULL;
cudaMalloc(&w_arr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_feedforward<<<gridBlock,threadBlock>>>(layer_id,l,s,sw,z_arr,a_arr,w_arr);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_feedforward<<<gridBlock,threadBlock>>>(layer_id,l,s,sw,z_arr,a_arr,w_arr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_feedforward<<<gridBlock,threadBlock>>>(layer_id,l,s,sw,z_arr,a_arr,w_arr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a520760e73b3647a5c134c0713d14ed1861fed79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ============================================================================
*
* Authors:
* Hunter McCoy <[email protected]
*
* ============================================================================
*/
#define DEBUG_ASSERTS 0
#define DEBUG_PRINTS 0
#include <poggers/allocators/cms.cuh>
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <chrono>
#define stack_bytes 32768
#define MEGABYTE 1024*1024
#define GIGABYTE 1024*MEGABYTE
using shibboleth = poggers::allocators::shibboleth<stack_bytes, 10, 4>;
__global__ void cms_single_threaded(shibboleth * cms){
uint64_t test_size = 4096;
uint ** address_array = (uint **) cms->cms_malloc(4096*sizeof(uint *));
if (address_array == nullptr){
printf("address_array malloc failed\n");
asm("trap;");
}
for (uint64_t i = 0; i < test_size; i++){
address_array[i] = (uint *) cms->cms_malloc(4);
if (address_array[i] == nullptr){
printf("Could not allocate %llu\n", i);
asm("trap;");
}
}
for (uint64_t i = 0; i < test_size; i++){
address_array[i][0] = i;
}
for (uint64_t i = 0; i < test_size; i++){
if (address_array[i][0] != i){
printf("Memory corrupted at %llu, shows %llu instead of %llu\n", i, address_array[i][0], i);
asm("trap;");
}
}
for (uint64_t i = 0; i< test_size; i++){
cms->cms_free(address_array[i]);
}
cms->cms_free(address_array);
}
int main(int argc, char** argv) {
//allocate
//const uint64_t meg = 1024*1024;
const uint64_t bytes_in_use = 8*MEGABYTE;
shibboleth * allocator = shibboleth::init(bytes_in_use);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cms_single_threaded), dim3(1),dim3(1), 0, 0, allocator);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cms_single_threaded), dim3(1), dim3(100), 0, 0, allocator);
shibboleth::free_cms_allocator(allocator);
hipDeviceSynchronize();
return 0;
}
| a520760e73b3647a5c134c0713d14ed1861fed79.cu | /*
* ============================================================================
*
* Authors:
* Hunter McCoy <[email protected]
*
* ============================================================================
*/
#define DEBUG_ASSERTS 0
#define DEBUG_PRINTS 0
#include <poggers/allocators/cms.cuh>
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <chrono>
#define stack_bytes 32768
#define MEGABYTE 1024*1024
#define GIGABYTE 1024*MEGABYTE
using shibboleth = poggers::allocators::shibboleth<stack_bytes, 10, 4>;
__global__ void cms_single_threaded(shibboleth * cms){
uint64_t test_size = 4096;
uint ** address_array = (uint **) cms->cms_malloc(4096*sizeof(uint *));
if (address_array == nullptr){
printf("address_array malloc failed\n");
asm("trap;");
}
for (uint64_t i = 0; i < test_size; i++){
address_array[i] = (uint *) cms->cms_malloc(4);
if (address_array[i] == nullptr){
printf("Could not allocate %llu\n", i);
asm("trap;");
}
}
for (uint64_t i = 0; i < test_size; i++){
address_array[i][0] = i;
}
for (uint64_t i = 0; i < test_size; i++){
if (address_array[i][0] != i){
printf("Memory corrupted at %llu, shows %llu instead of %llu\n", i, address_array[i][0], i);
asm("trap;");
}
}
for (uint64_t i = 0; i< test_size; i++){
cms->cms_free(address_array[i]);
}
cms->cms_free(address_array);
}
int main(int argc, char** argv) {
//allocate
//const uint64_t meg = 1024*1024;
const uint64_t bytes_in_use = 8*MEGABYTE;
shibboleth * allocator = shibboleth::init(bytes_in_use);
cudaDeviceSynchronize();
cms_single_threaded<<<1,1>>>(allocator);
cudaDeviceSynchronize();
cms_single_threaded<<<1, 100>>>(allocator);
shibboleth::free_cms_allocator(allocator);
cudaDeviceSynchronize();
return 0;
}
|
d1ec045b61ab86dfcc9251795d982d6bdb6a57ef.hip | // !!! This is a file automatically generated by hipify!!!
#define _CRT_SECURE_NO_DEPRECATE
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#define ITERATIONS 10000000
#define BLOCK_SIZE 32
//captura de errores para las funciones de cuda
static void HandleError(hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err), file, line);
system("pause");
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__global__
void spmvAsync(double *y,
const double *A,
const int *IA,
const int *JA,
const int M,
const double *x,
const int offset)
{
__shared__ float t_sum[BLOCK_SIZE]; // thread sum
int j;
int t_id = offset + threadIdx.x + blockDim.x * blockIdx.x; // thread id
int row = t_id / 32; // one warp per row
int t_warp = t_id & 31; // thread number within a given warp
// boundary condition
if (row < M){
// compute running sum per thread in warp
t_sum[threadIdx.x] = 0;
for (j = IA[row] + t_warp; j < IA[row+1]; j += 32)
t_sum[threadIdx.x] += A[j] * x[JA[j]];
// Parallel reduction of result in shared memory for one warp
if (t_warp < 16) t_sum[threadIdx.x] += t_sum[threadIdx.x+16];
if (t_warp < 8) t_sum[threadIdx.x] += t_sum[threadIdx.x+8];
if (t_warp < 4) t_sum[threadIdx.x] += t_sum[threadIdx.x+4];
if (t_warp < 2) t_sum[threadIdx.x] += t_sum[threadIdx.x+2];
if (t_warp < 1) t_sum[threadIdx.x] += t_sum[threadIdx.x+1];
// first thread within warp contains desired y[row] result so write it to y
if (t_warp == 0)
y[row] = t_sum[threadIdx.x];
}
}
void cootocsr(int *rowoff, int *row, int size) {
rowoff[0] = 0;
int prev = 0, accu = 1, j = 1;
for (int i = 1; i < size; i++) {
if (row[i] - row[prev] > 1) {
for (int k = 0; k < row[i] - row[prev]; k++) {
rowoff[j++] = accu;
}
prev = i;
}
else
if (row[prev] != row[i]) {
rowoff[j++] = accu;
prev = i;
}
accu += 1;
}
rowoff[j] = accu;
}
int main() {
const int nStreams = 4;
//************ 1) Leer archivos de dataset ************//
//FILE *pToMFile = fopen("mat5_5.txt", "r"); //5 5 13
//FILE *pToMFile = fopen("mat20_20.txt", "r"); //20 20 34
//FILE *pToMFile = fopen("cop20k_A.mtx", "r"); //121192 121192 1362087
//FILE *pToMFile = fopen("cant.mtx", "r"); //62451 62451 2034917
//FILE *pToMFile = fopen("consph.mtx", "r"); //83334 83334 3046907
//FILE *pToMFile = fopen("mac_econ_fwd500.mtx", "r"); //206500 206500 1273389
//FILE *pToMFile = fopen("mc2depi.mtx", "r"); //525825 525825 2100225
//FILE *pToMFile = fopen("pdb1HYS.mtx", "r"); //36417 36417 2190591
//FILE *pToMFile = fopen("pwtk.mtx", "r"); //217918 217918 5926171
//FILE *pToMFile = fopen("scircuit.mtx", "r"); //170998 170998 958936
//FILE *pToMFile = fopen("shipsec1.mtx", "r"); //140874 140874 3977139
FILE *pToMFile = fopen("webbase-1M.mtx", "r"); //1000005 1000005 3105536
//************ 2) Extraer tamaos del vector/matriz_cuadradda y elementos no cero (NNZ ************//
int matsize, veclen,temp1;
fscanf(pToMFile, "%d", &veclen); //tamao del vector
fscanf(pToMFile, "%d", &temp1); //saltar.
fscanf(pToMFile, "%d", &matsize); //tamao de NNZ
//************ 3) Crear vectores host en pinned memory para capturar datos del archivo dataset ************//
int mintsize = matsize * sizeof(int);
double mdoublesize = matsize * sizeof(double);
int *h_row;
HANDLE_ERROR(hipHostMalloc((void **)&h_row, mintsize));
double *h_mvalue;
HANDLE_ERROR(hipHostMalloc((void **)&h_mvalue, mdoublesize));
//int *h_col = (int *)malloc(mintsize);
int *h_col;
HANDLE_ERROR(hipHostMalloc((void **)&h_col, mintsize));
//************ 4) Capturar elementos de la matriz sparse ************//
for (int i = 0; i < matsize; i++)
{
fscanf(pToMFile, "%d", &h_col[i]);
fscanf(pToMFile, "%d", &h_row[i]);
fscanf(pToMFile, "%lf", &h_mvalue[i]);
}
fclose(pToMFile);
//************ 5) Crear y poblar el vector en pinned memory ************//
int vecbytes = veclen * sizeof(double);
double *h_vec;
HANDLE_ERROR(hipHostMalloc((void **)&h_vec, vecbytes));
srand((long)time(NULL));
for (int i = 0; i < veclen; i++) {
h_vec[i]=rand()/(double)RAND_MAX;
}
//************ 6) convertir matriz sparced de COO a CSR convitiendo el vector h_row en h_rowoff ************//
int rownum = h_row[matsize - 1] + 1; //sale 5 para una matriz de 5 x 5
int rowoffsize = (rownum) * sizeof(int); //sale 6 para el tamao del vector row_ofsset
int *h_rowoff;
HANDLE_ERROR(hipHostMalloc((void **)&h_rowoff, rowoffsize));
cootocsr(h_rowoff, h_row, matsize);
//************ 7) Crear y localizar vectores en device ************//
double outputsize = veclen * sizeof(double);
double *d_mvalue;
int *d_col;
int *d_rowoff;
double *d_vec;
double *d_output_sm;
HANDLE_ERROR(hipMalloc((void **)&d_mvalue, mdoublesize));
HANDLE_ERROR(hipMalloc((void **)&d_col, mintsize));
HANDLE_ERROR(hipMalloc((void **)&d_rowoff, rowoffsize));
HANDLE_ERROR(hipMalloc((void **)&d_vec, vecbytes));
HANDLE_ERROR(hipMalloc((void **)&d_output_sm, outputsize));
double *h_output_sm;
HANDLE_ERROR(hipHostMalloc((void **)&h_output_sm, outputsize)); //pinned memory
//************ 8) Crear streams y eventos para medir el tiempo ************//
hipEvent_t startEvent, stopEvent, dummyEvent;
hipStream_t stream[nStreams];
HANDLE_ERROR( hipEventCreate(&startEvent) );
HANDLE_ERROR( hipEventCreate(&stopEvent) );
HANDLE_ERROR( hipEventCreate(&dummyEvent) );
for (int i = 0; i < nStreams; ++i)
HANDLE_ERROR( hipStreamCreate(&stream[i]) );
const int streamSize = matsize / nStreams;
//************ 9) Copiar de manera asyncrona vectores del host al device ************//
HANDLE_ERROR( hipEventRecord(startEvent,0) );
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
HANDLE_ERROR( hipMemcpyAsync(&d_mvalue[offset], &h_mvalue[offset], streamSize * sizeof(double), hipMemcpyHostToDevice, stream[i]) );
HANDLE_ERROR( hipMemcpyAsync(&d_col[offset], &h_col[offset], streamSize * sizeof(int), hipMemcpyHostToDevice, stream[i]) );
}
const int streamSize_rowoff = (rownum ) / nStreams;
for (int i = 0; i < nStreams; ++i)
{
int offset_rowoff = i * streamSize_rowoff;
HANDLE_ERROR( hipMemcpyAsync(&d_rowoff[offset_rowoff], &h_rowoff[offset_rowoff], streamSize_rowoff * sizeof(int), hipMemcpyHostToDevice, stream[i]) );
}
const int streamSize_vec = veclen / nStreams;
for (int i = 0; i < nStreams; ++i)
{
int offset_vec = i * streamSize_vec;
HANDLE_ERROR( hipMemcpyAsync(&d_vec[offset_vec], &h_vec[offset_vec], streamSize_vec * sizeof(double), hipMemcpyHostToDevice, stream[i]) );
}
//************ 10) Ejecutar de manera asyncrona le kernel code haciendo uso de shared memory ************//
for (int i = 0; i < nStreams; ++i)
{
int offset_vec = i * streamSize_vec;
hipLaunchKernelGGL(( spmvAsync), dim3((streamSize_vec/BLOCK_SIZE)),dim3(BLOCK_SIZE),0,stream[i], d_output_sm, d_mvalue, d_rowoff, d_col, rownum, d_vec,offset_vec);
}
//************ 11) Copiar de manera asyncrona vectores resultante del device al host ************//
for (int i = 0; i < nStreams; ++i)
{
int offset_vec = i * streamSize_vec;
HANDLE_ERROR( hipMemcpyAsync(&h_output_sm[offset_vec], &d_output_sm[offset_vec], streamSize_vec * sizeof(double), hipMemcpyDeviceToHost, stream[i]));
}
//************ 12) Clcular metricas de tiempo y Gflops ************//
float ms;
HANDLE_ERROR( hipEventRecord(stopEvent, 0) );
HANDLE_ERROR( hipEventSynchronize(stopEvent) );
HANDLE_ERROR( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Tiempo de ejecucin de transferir y ejecutar (segundos): %f\n", ms/1e3);
double Flops_sm=(ITERATIONS)/(double)(ms/1e3);
double gFlops_sm=(double)Flops_sm/1e9;// Calculate Giga Flops Formula: F lops * 10raised to (-9).
printf("GFLOPS con async : %f\n",gFlops_sm);
//calculo del ancho de banda
double cant_bytes=0;
cant_bytes+=matsize*8; //vector elementos NNZ
cant_bytes+=matsize*4; //vector de columnas
cant_bytes+=(rownum+1)*4; //vector de filas rowoff
cant_bytes+=veclen*8; //vector a multiplicar
cant_bytes+=veclen*8; //vector resultante
printf("Effective Bandwidth (GB/s): %f \n", (cant_bytes/1e9)/(ms/1e3));
//************ 13) Ejecutar y calcular tiempo para un proceso spmv serial ************//
double *temp = (double *)malloc(outputsize);
clock_t ts;
ts = clock();
for (int i = 0; i < rownum; i++) {
temp[i] = 0;
for (int j = h_rowoff[i]; j < h_rowoff[i + 1]; j++) {
temp[i] += h_mvalue[j] * h_vec[h_col[j]];
}
}
ts = clock() - ts;
double time_serial = (((double)ts)/CLOCKS_PER_SEC); // in seconds
printf("El tiempo serial tomo %f segundos \n", time_serial);
printf("El SpeedUp es %f segundos \n", ms/time_serial);
system("PAUSE()");
for (int i = 0; i < veclen; i++) {
printf("Matriz resultante, elemento: %d = %lf\n", i, temp[i]);
}
system("PAUSE()");
for (int i = 0; i < veclen; i++) {
printf("Matriz resultante, elemento: %d = %lf\n", i, h_output_sm[i]);
}
//************ 14) Liberar vectores de memoria global y pinned ************//
HANDLE_ERROR(hipHostFree(h_row));
HANDLE_ERROR(hipHostFree(h_rowoff));
HANDLE_ERROR(hipHostFree(h_col));
HANDLE_ERROR(hipHostFree(h_mvalue));
HANDLE_ERROR(hipHostFree(h_vec));
free(temp);
HANDLE_ERROR(hipHostFree(h_output_sm));
//Deallocate GPU memory:
HANDLE_ERROR(hipFree(d_mvalue));
HANDLE_ERROR(hipFree(d_col));
HANDLE_ERROR(hipFree(d_rowoff));
HANDLE_ERROR(hipFree(d_vec));
HANDLE_ERROR(hipFree(d_output_sm));
system("PAUSE()");
return 0;
} | d1ec045b61ab86dfcc9251795d982d6bdb6a57ef.cu | #define _CRT_SECURE_NO_DEPRECATE
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#define ITERATIONS 10000000
#define BLOCK_SIZE 32
//captura de errores para las funciones de cuda
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err), file, line);
system("pause");
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__global__
void spmvAsync(double *y,
const double *A,
const int *IA,
const int *JA,
const int M,
const double *x,
const int offset)
{
__shared__ float t_sum[BLOCK_SIZE]; // thread sum
int j;
int t_id = offset + threadIdx.x + blockDim.x * blockIdx.x; // thread id
int row = t_id / 32; // one warp per row
int t_warp = t_id & 31; // thread number within a given warp
// boundary condition
if (row < M){
// compute running sum per thread in warp
t_sum[threadIdx.x] = 0;
for (j = IA[row] + t_warp; j < IA[row+1]; j += 32)
t_sum[threadIdx.x] += A[j] * x[JA[j]];
// Parallel reduction of result in shared memory for one warp
if (t_warp < 16) t_sum[threadIdx.x] += t_sum[threadIdx.x+16];
if (t_warp < 8) t_sum[threadIdx.x] += t_sum[threadIdx.x+8];
if (t_warp < 4) t_sum[threadIdx.x] += t_sum[threadIdx.x+4];
if (t_warp < 2) t_sum[threadIdx.x] += t_sum[threadIdx.x+2];
if (t_warp < 1) t_sum[threadIdx.x] += t_sum[threadIdx.x+1];
// first thread within warp contains desired y[row] result so write it to y
if (t_warp == 0)
y[row] = t_sum[threadIdx.x];
}
}
void cootocsr(int *rowoff, int *row, int size) {
rowoff[0] = 0;
int prev = 0, accu = 1, j = 1;
for (int i = 1; i < size; i++) {
if (row[i] - row[prev] > 1) {
for (int k = 0; k < row[i] - row[prev]; k++) {
rowoff[j++] = accu;
}
prev = i;
}
else
if (row[prev] != row[i]) {
rowoff[j++] = accu;
prev = i;
}
accu += 1;
}
rowoff[j] = accu;
}
int main() {
const int nStreams = 4;
//************ 1) Leer archivos de dataset ************//
//FILE *pToMFile = fopen("mat5_5.txt", "r"); //5 5 13
//FILE *pToMFile = fopen("mat20_20.txt", "r"); //20 20 34
//FILE *pToMFile = fopen("cop20k_A.mtx", "r"); //121192 121192 1362087
//FILE *pToMFile = fopen("cant.mtx", "r"); //62451 62451 2034917
//FILE *pToMFile = fopen("consph.mtx", "r"); //83334 83334 3046907
//FILE *pToMFile = fopen("mac_econ_fwd500.mtx", "r"); //206500 206500 1273389
//FILE *pToMFile = fopen("mc2depi.mtx", "r"); //525825 525825 2100225
//FILE *pToMFile = fopen("pdb1HYS.mtx", "r"); //36417 36417 2190591
//FILE *pToMFile = fopen("pwtk.mtx", "r"); //217918 217918 5926171
//FILE *pToMFile = fopen("scircuit.mtx", "r"); //170998 170998 958936
//FILE *pToMFile = fopen("shipsec1.mtx", "r"); //140874 140874 3977139
FILE *pToMFile = fopen("webbase-1M.mtx", "r"); //1000005 1000005 3105536
//************ 2) Extraer tamaños del vector/matriz_cuadradda y elementos no cero (NNZ ************//
int matsize, veclen,temp1;
fscanf(pToMFile, "%d", &veclen); //tamaño del vector
fscanf(pToMFile, "%d", &temp1); //saltar.
fscanf(pToMFile, "%d", &matsize); //tamaño de NNZ
//************ 3) Crear vectores host en pinned memory para capturar datos del archivo dataset ************//
int mintsize = matsize * sizeof(int);
double mdoublesize = matsize * sizeof(double);
int *h_row;
HANDLE_ERROR(cudaMallocHost((void **)&h_row, mintsize));
double *h_mvalue;
HANDLE_ERROR(cudaMallocHost((void **)&h_mvalue, mdoublesize));
//int *h_col = (int *)malloc(mintsize);
int *h_col;
HANDLE_ERROR(cudaMallocHost((void **)&h_col, mintsize));
//************ 4) Capturar elementos de la matriz sparse ************//
for (int i = 0; i < matsize; i++)
{
fscanf(pToMFile, "%d", &h_col[i]);
fscanf(pToMFile, "%d", &h_row[i]);
fscanf(pToMFile, "%lf", &h_mvalue[i]);
}
fclose(pToMFile);
//************ 5) Crear y poblar el vector en pinned memory ************//
int vecbytes = veclen * sizeof(double);
double *h_vec;
HANDLE_ERROR(cudaMallocHost((void **)&h_vec, vecbytes));
srand((long)time(NULL));
for (int i = 0; i < veclen; i++) {
h_vec[i]=rand()/(double)RAND_MAX;
}
//************ 6) convertir matriz sparced de COO a CSR convitiendo el vector h_row en h_rowoff ************//
int rownum = h_row[matsize - 1] + 1; //sale 5 para una matriz de 5 x 5
int rowoffsize = (rownum) * sizeof(int); //sale 6 para el tamaño del vector row_ofsset
int *h_rowoff;
HANDLE_ERROR(cudaMallocHost((void **)&h_rowoff, rowoffsize));
cootocsr(h_rowoff, h_row, matsize);
//************ 7) Crear y localizar vectores en device ************//
double outputsize = veclen * sizeof(double);
double *d_mvalue;
int *d_col;
int *d_rowoff;
double *d_vec;
double *d_output_sm;
HANDLE_ERROR(cudaMalloc((void **)&d_mvalue, mdoublesize));
HANDLE_ERROR(cudaMalloc((void **)&d_col, mintsize));
HANDLE_ERROR(cudaMalloc((void **)&d_rowoff, rowoffsize));
HANDLE_ERROR(cudaMalloc((void **)&d_vec, vecbytes));
HANDLE_ERROR(cudaMalloc((void **)&d_output_sm, outputsize));
double *h_output_sm;
HANDLE_ERROR(cudaMallocHost((void **)&h_output_sm, outputsize)); //pinned memory
//************ 8) Crear streams y eventos para medir el tiempo ************//
cudaEvent_t startEvent, stopEvent, dummyEvent;
cudaStream_t stream[nStreams];
HANDLE_ERROR( cudaEventCreate(&startEvent) );
HANDLE_ERROR( cudaEventCreate(&stopEvent) );
HANDLE_ERROR( cudaEventCreate(&dummyEvent) );
for (int i = 0; i < nStreams; ++i)
HANDLE_ERROR( cudaStreamCreate(&stream[i]) );
const int streamSize = matsize / nStreams;
//************ 9) Copiar de manera asyncrona vectores del host al device ************//
HANDLE_ERROR( cudaEventRecord(startEvent,0) );
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
HANDLE_ERROR( cudaMemcpyAsync(&d_mvalue[offset], &h_mvalue[offset], streamSize * sizeof(double), cudaMemcpyHostToDevice, stream[i]) );
HANDLE_ERROR( cudaMemcpyAsync(&d_col[offset], &h_col[offset], streamSize * sizeof(int), cudaMemcpyHostToDevice, stream[i]) );
}
const int streamSize_rowoff = (rownum ) / nStreams;
for (int i = 0; i < nStreams; ++i)
{
int offset_rowoff = i * streamSize_rowoff;
HANDLE_ERROR( cudaMemcpyAsync(&d_rowoff[offset_rowoff], &h_rowoff[offset_rowoff], streamSize_rowoff * sizeof(int), cudaMemcpyHostToDevice, stream[i]) );
}
const int streamSize_vec = veclen / nStreams;
for (int i = 0; i < nStreams; ++i)
{
int offset_vec = i * streamSize_vec;
HANDLE_ERROR( cudaMemcpyAsync(&d_vec[offset_vec], &h_vec[offset_vec], streamSize_vec * sizeof(double), cudaMemcpyHostToDevice, stream[i]) );
}
//************ 10) Ejecutar de manera asyncrona le kernel code haciendo uso de shared memory ************//
for (int i = 0; i < nStreams; ++i)
{
int offset_vec = i * streamSize_vec;
spmvAsync<<<(streamSize_vec/BLOCK_SIZE),BLOCK_SIZE,0,stream[i]>>>(d_output_sm, d_mvalue, d_rowoff, d_col, rownum, d_vec,offset_vec);
}
//************ 11) Copiar de manera asyncrona vectores resultante del device al host ************//
for (int i = 0; i < nStreams; ++i)
{
int offset_vec = i * streamSize_vec;
HANDLE_ERROR( cudaMemcpyAsync(&h_output_sm[offset_vec], &d_output_sm[offset_vec], streamSize_vec * sizeof(double), cudaMemcpyDeviceToHost, stream[i]));
}
//************ 12) Cálcular metricas de tiempo y Gflops ************//
float ms;
HANDLE_ERROR( cudaEventRecord(stopEvent, 0) );
HANDLE_ERROR( cudaEventSynchronize(stopEvent) );
HANDLE_ERROR( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Tiempo de ejecución de transferir y ejecutar (segundos): %f\n", ms/1e3);
double Flops_sm=(ITERATIONS)/(double)(ms/1e3);
double gFlops_sm=(double)Flops_sm/1e9;// Calculate Giga Flops Formula: F lops * 10raised to (-9).
printf("GFLOPS con async : %f\n",gFlops_sm);
//calculo del ancho de banda
double cant_bytes=0;
cant_bytes+=matsize*8; //vector elementos NNZ
cant_bytes+=matsize*4; //vector de columnas
cant_bytes+=(rownum+1)*4; //vector de filas rowoff
cant_bytes+=veclen*8; //vector a multiplicar
cant_bytes+=veclen*8; //vector resultante
printf("Effective Bandwidth (GB/s): %f \n", (cant_bytes/1e9)/(ms/1e3));
//************ 13) Ejecutar y calcular tiempo para un proceso spmv serial ************//
double *temp = (double *)malloc(outputsize);
clock_t ts;
ts = clock();
for (int i = 0; i < rownum; i++) {
temp[i] = 0;
for (int j = h_rowoff[i]; j < h_rowoff[i + 1]; j++) {
temp[i] += h_mvalue[j] * h_vec[h_col[j]];
}
}
ts = clock() - ts;
double time_serial = (((double)ts)/CLOCKS_PER_SEC); // in seconds
printf("El tiempo serial tomo %f segundos \n", time_serial);
printf("El SpeedUp es %f segundos \n", ms/time_serial);
system("PAUSE()");
for (int i = 0; i < veclen; i++) {
printf("Matriz resultante, elemento: %d = %lf\n", i, temp[i]);
}
system("PAUSE()");
for (int i = 0; i < veclen; i++) {
printf("Matriz resultante, elemento: %d = %lf\n", i, h_output_sm[i]);
}
//************ 14) Liberar vectores de memoria global y pinned ************//
HANDLE_ERROR(cudaFreeHost(h_row));
HANDLE_ERROR(cudaFreeHost(h_rowoff));
HANDLE_ERROR(cudaFreeHost(h_col));
HANDLE_ERROR(cudaFreeHost(h_mvalue));
HANDLE_ERROR(cudaFreeHost(h_vec));
free(temp);
HANDLE_ERROR(cudaFreeHost(h_output_sm));
//Deallocate GPU memory:
HANDLE_ERROR(cudaFree(d_mvalue));
HANDLE_ERROR(cudaFree(d_col));
HANDLE_ERROR(cudaFree(d_rowoff));
HANDLE_ERROR(cudaFree(d_vec));
HANDLE_ERROR(cudaFree(d_output_sm));
system("PAUSE()");
return 0;
} |
6c7d6724c115ef5a69bc6b8f1b0acc110a27d8d3.hip | // !!! This is a file automatically generated by hipify!!!
#include <random>
#include <iostream>
#include <hip/hip_runtime.h>
#include "../cuda_common.hpp"
struct relu_grad
{
__device__ float operator()(float x) { return x > 0; }
};
struct mish_grad_dn
{
__device__ float softplus_kernel(float x, float threshold = 20)
{
if (x > threshold) return x;
else if (x < -threshold) return expf(x);
return log1pf(expf(x));
}
__device__ float operator()(float x)
{
const float MISH_THRESHOLD = 20.0f;
const float sp = softplus_kernel(x, MISH_THRESHOLD);
const float grad_sp = -expm1f(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = x * grad_tsp + tsp;
return grad;
}
};
// taken from https://github.com/thomasbrandon/mish-cuda
struct mish_grad_tb
{
__device__ float operator()(float x)
{
const float THRESHOLD = 20.0f;
const float sp = x < THRESHOLD ? log1p(expf(x)) : x;
const float grad_sp = 1 - exp(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = x * grad_tsp + tsp;
return grad;
}
};
struct mish_grad_tb_expm1
{
__device__ float operator()(float x)
{
const float THRESHOLD = 20.0f;
const float sp = x < THRESHOLD ? log1p(expf(x)) : x;
const float grad_sp = -expm1(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = x * grad_tsp + tsp;
return grad;
}
};
struct mish_grad_fast
{
__device__ float operator()(float x)
{
auto e = __expf(x);
auto n = e * e + 2 * e;
float tsp;
if (x <= -0.6f)
tsp = __fdividef(n, n + 2);
else
tsp = 1 - __fdividef(2, n + 2);
const float grad_sp = __fdividef(e, e + 1);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = x * grad_tsp + tsp;
return x > 10.5f ? 1 : grad;
}
};
struct mish_grad_double
{
__device__ float operator()(float x)
{
const double sp = log1p(exp(x));
const double grad_sp = -expm1(-sp);
const double tsp = tanh(sp);
const double grad_tsp = (1 - tsp*tsp) * grad_sp;
const double grad = x * grad_tsp + tsp;
return grad;
}
};
template <class GradientFunc>
__global__ void grad_vec1(float* /* __restrict__ */ dz, const float* /* __restrict__ */ input, int n)
{
GradientFunc grad;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
dz[i] *= grad(input[i]);
}
}
template <class GradientFunc>
__global__ void grad_vec4(float4* /* __restrict__ */ dz, const float4* /* __restrict__ */ input, int n)
{
GradientFunc grad;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
float4 temp = input[i];
float4 dy = dz[i];
dy.w *= grad(temp.w);
dy.x *= grad(temp.x);
dy.y *= grad(temp.y);
dy.z *= grad(temp.z);
dz[i] = dy;
}
}
__global__ void limit_2L1S_v1(float* /* __restrict__ */ dz, const float* /* __restrict__ */ input, int n)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
dz[i] += input[i];
}
__global__ void limit_2L1S_v4(float4* /* __restrict__ */ dz, const float4* /* __restrict__ */ input, int n)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
auto dy = dz[i];
auto inp = input[i];
dy.w += inp.w;
dy.x += inp.x;
dy.y += inp.y;
dy.z += inp.z;
dz[i] = dy;
}
}
// dump values for plot.py to visualize errors
template <class GradientFunc>
__global__ void dump()
{
GradientFunc grad;
for (float x = -100; x < 20; x += 0.0001)
printf("%.7f %.7e\n", x, grad(x));
}
int main()
{
// dump<mish_grad_tb><<<1, 1>>>();
// hipDeviceSynchronize();
// return 0;
constexpr int NUM_ELEMENTS = 1024 * 1024 * 16;
float *input_d;
CHECK_CUDA(hipMalloc(&input_d, NUM_ELEMENTS * sizeof(float)));
float *grad_d;
CHECK_CUDA(hipMalloc(&grad_d, NUM_ELEMENTS * sizeof(float)));
float *input_h = new float[NUM_ELEMENTS];
float *grad_h = new float[NUM_ELEMENTS];
float *output_h = new float[NUM_ELEMENTS];
float *output_ref = new float[NUM_ELEMENTS];
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> in_dis(-50, 20);
for (int i = 0; i < NUM_ELEMENTS; i++)
{
long double a = in_dis(gen);
input_h[i] = a;
long double dy = 1.0;
grad_h[i] = dy;
const long double sp = std::log1p(::exp(a));
const long double grad_sp = -std::expm1(-sp);
const long double tsp = std::tanh(sp);
const long double grad_tsp = (1 - tsp * tsp) * grad_sp;
const long double grad = a * grad_tsp + tsp;
output_ref[i] = dy * grad;
}
auto lInorm = [&] (float* x, float* y, int n) {
float max = 0;
for (int i = 0; i < n; i++)
max = ::max(max, std::abs(y[i] - x[i]));
return max;
};
auto l2norm = [] (float* x, float* y, int n) {
std::vector<double> diff(n);
for (int i = 0; i < n; i++)
diff[i] = y[i] - x[i];
auto sqr_sum = std::accumulate(std::begin(diff), std::end(diff), 0.0, [](auto lhs, auto rhs) { return lhs + rhs * rhs; });
return std::sqrt(sqr_sum);
};
constexpr int NUM_BLOCKS = 100, BLOCK_SIZE = 1024;
// relu grad for reference
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec1<relu_grad>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(hipDeviceSynchronize());
// to establish approximate bounds on performance achievable based on memory accesses
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( limit_2L1S_v1), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec1<mish_grad_dn>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "[vec1] mish_grad_dn: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec1<mish_grad_tb>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "[vec1] mish_grad_tb: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec1<mish_grad_tb_expm1>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "[vec1] mish_grad_tb_expm1: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec1<mish_grad_fast>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "[vec1] mish_grad_fast: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec1<mish_grad_double>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "[vec1] mish_grad_double: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
static_assert(NUM_ELEMENTS % 4 == 0, "");
auto grad_d4 = reinterpret_cast<float4*>(grad_d);
auto input_d4 = reinterpret_cast<float4*>(input_d);
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( limit_2L1S_v4), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec4<relu_grad>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec4<mish_grad_dn>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "[vec4] mish_grad_dn: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec4<mish_grad_tb>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "[vec4] mish_grad_tb: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec4<mish_grad_tb_expm1>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "[vec4] mish_grad_tb_expm1: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec4<mish_grad_fast>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "[vec4] mish_grad_fast: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(hipMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( grad_vec4<mish_grad_double>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "[vec4] mish_grad_double: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
return 0;
} | 6c7d6724c115ef5a69bc6b8f1b0acc110a27d8d3.cu | #include <random>
#include <iostream>
#include <cuda_runtime.h>
#include "../cuda_common.hpp"
struct relu_grad
{
__device__ float operator()(float x) { return x > 0; }
};
struct mish_grad_dn
{
__device__ float softplus_kernel(float x, float threshold = 20)
{
if (x > threshold) return x;
else if (x < -threshold) return expf(x);
return log1pf(expf(x));
}
__device__ float operator()(float x)
{
const float MISH_THRESHOLD = 20.0f;
const float sp = softplus_kernel(x, MISH_THRESHOLD);
const float grad_sp = -expm1f(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = x * grad_tsp + tsp;
return grad;
}
};
// taken from https://github.com/thomasbrandon/mish-cuda
struct mish_grad_tb
{
__device__ float operator()(float x)
{
const float THRESHOLD = 20.0f;
const float sp = x < THRESHOLD ? log1p(expf(x)) : x;
const float grad_sp = 1 - exp(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = x * grad_tsp + tsp;
return grad;
}
};
struct mish_grad_tb_expm1
{
__device__ float operator()(float x)
{
const float THRESHOLD = 20.0f;
const float sp = x < THRESHOLD ? log1p(expf(x)) : x;
const float grad_sp = -expm1(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = x * grad_tsp + tsp;
return grad;
}
};
struct mish_grad_fast
{
__device__ float operator()(float x)
{
auto e = __expf(x);
auto n = e * e + 2 * e;
float tsp;
if (x <= -0.6f)
tsp = __fdividef(n, n + 2);
else
tsp = 1 - __fdividef(2, n + 2);
const float grad_sp = __fdividef(e, e + 1);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = x * grad_tsp + tsp;
return x > 10.5f ? 1 : grad;
}
};
struct mish_grad_double
{
__device__ float operator()(float x)
{
const double sp = log1p(exp(x));
const double grad_sp = -expm1(-sp);
const double tsp = tanh(sp);
const double grad_tsp = (1 - tsp*tsp) * grad_sp;
const double grad = x * grad_tsp + tsp;
return grad;
}
};
template <class GradientFunc>
__global__ void grad_vec1(float* /* __restrict__ */ dz, const float* /* __restrict__ */ input, int n)
{
GradientFunc grad;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
dz[i] *= grad(input[i]);
}
}
template <class GradientFunc>
__global__ void grad_vec4(float4* /* __restrict__ */ dz, const float4* /* __restrict__ */ input, int n)
{
GradientFunc grad;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
float4 temp = input[i];
float4 dy = dz[i];
dy.w *= grad(temp.w);
dy.x *= grad(temp.x);
dy.y *= grad(temp.y);
dy.z *= grad(temp.z);
dz[i] = dy;
}
}
__global__ void limit_2L1S_v1(float* /* __restrict__ */ dz, const float* /* __restrict__ */ input, int n)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
dz[i] += input[i];
}
__global__ void limit_2L1S_v4(float4* /* __restrict__ */ dz, const float4* /* __restrict__ */ input, int n)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
auto dy = dz[i];
auto inp = input[i];
dy.w += inp.w;
dy.x += inp.x;
dy.y += inp.y;
dy.z += inp.z;
dz[i] = dy;
}
}
// dump values for plot.py to visualize errors
template <class GradientFunc>
__global__ void dump()
{
GradientFunc grad;
for (float x = -100; x < 20; x += 0.0001)
printf("%.7f %.7e\n", x, grad(x));
}
int main()
{
// dump<mish_grad_tb><<<1, 1>>>();
// cudaDeviceSynchronize();
// return 0;
constexpr int NUM_ELEMENTS = 1024 * 1024 * 16;
float *input_d;
CHECK_CUDA(cudaMalloc(&input_d, NUM_ELEMENTS * sizeof(float)));
float *grad_d;
CHECK_CUDA(cudaMalloc(&grad_d, NUM_ELEMENTS * sizeof(float)));
float *input_h = new float[NUM_ELEMENTS];
float *grad_h = new float[NUM_ELEMENTS];
float *output_h = new float[NUM_ELEMENTS];
float *output_ref = new float[NUM_ELEMENTS];
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> in_dis(-50, 20);
for (int i = 0; i < NUM_ELEMENTS; i++)
{
long double a = in_dis(gen);
input_h[i] = a;
long double dy = 1.0;
grad_h[i] = dy;
const long double sp = std::log1p(std::exp(a));
const long double grad_sp = -std::expm1(-sp);
const long double tsp = std::tanh(sp);
const long double grad_tsp = (1 - tsp * tsp) * grad_sp;
const long double grad = a * grad_tsp + tsp;
output_ref[i] = dy * grad;
}
auto lInorm = [&] (float* x, float* y, int n) {
float max = 0;
for (int i = 0; i < n; i++)
max = std::max(max, std::abs(y[i] - x[i]));
return max;
};
auto l2norm = [] (float* x, float* y, int n) {
std::vector<double> diff(n);
for (int i = 0; i < n; i++)
diff[i] = y[i] - x[i];
auto sqr_sum = std::accumulate(std::begin(diff), std::end(diff), 0.0, [](auto lhs, auto rhs) { return lhs + rhs * rhs; });
return std::sqrt(sqr_sum);
};
constexpr int NUM_BLOCKS = 100, BLOCK_SIZE = 1024;
// relu grad for reference
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec1<relu_grad><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(cudaDeviceSynchronize());
// to establish approximate bounds on performance achievable based on memory accesses
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
limit_2L1S_v1<<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec1<mish_grad_dn><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "[vec1] mish_grad_dn: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec1<mish_grad_tb><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "[vec1] mish_grad_tb: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec1<mish_grad_tb_expm1><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "[vec1] mish_grad_tb_expm1: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec1<mish_grad_fast><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "[vec1] mish_grad_fast: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec1<mish_grad_double><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d, input_d, NUM_ELEMENTS);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "[vec1] mish_grad_double: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
static_assert(NUM_ELEMENTS % 4 == 0, "");
auto grad_d4 = reinterpret_cast<float4*>(grad_d);
auto input_d4 = reinterpret_cast<float4*>(input_d);
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
limit_2L1S_v4<<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec4<relu_grad><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec4<mish_grad_dn><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "[vec4] mish_grad_dn: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec4<mish_grad_tb><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "[vec4] mish_grad_tb: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec4<mish_grad_tb_expm1><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "[vec4] mish_grad_tb_expm1: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec4<mish_grad_fast><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "[vec4] mish_grad_fast: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
CHECK_CUDA(cudaMemcpy(input_d, input_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(grad_d, grad_h, NUM_ELEMENTS * sizeof(float), cudaMemcpyHostToDevice));
grad_vec4<mish_grad_double><<<NUM_BLOCKS, BLOCK_SIZE>>>(grad_d4, input_d4, NUM_ELEMENTS / 4);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaMemcpy(output_h, grad_d, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "[vec4] mish_grad_double: " << l2norm(output_ref, output_h, NUM_ELEMENTS) << ' ' << lInorm(output_ref, output_h, NUM_ELEMENTS) << '\n';
return 0;
} |
ca910d7c813155e7dcbc4608cabbe1f159136978.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <algorithm>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/atomic/common.cuh"
#include "reduction_functions.h"
#include "reduction_utils.cuh"
#define NUM_ELEMENTS_PER_THREAD 4
#define NUM_THREADS_PER_WARP 32
#define NUM_WARPS_PER_BLOCK 8
#define MAX_NUM_BLOCKS 256
#define ALL_ONE_MASK 0xFFFFFFFF
#define ONE_MASK 0x00000001
namespace onnxruntime {
namespace cuda {
std::pair<int, int> compute_block_size(int size) {
int x = NUM_THREADS_PER_WARP;
int y = ::min(NUM_WARPS_PER_BLOCK, ::max(1, size / (NUM_ELEMENTS_PER_THREAD * NUM_THREADS_PER_WARP)));
return std::make_pair(x, y);
}
int compute_grid_size(int size) {
const auto block = compute_block_size(size);
return ::min(MAX_NUM_BLOCKS, ::max(1, size / (NUM_ELEMENTS_PER_THREAD * block.first * block.second)));
}
int compute_reduction_buffer_size(int element_size, int size) {
const int num_blocks = compute_grid_size(size);
return static_cast<int>(num_blocks * element_size + sizeof(int));
}
template<typename TIn, typename TOut, typename TOp, typename TFinalOp, bool DivideResultBySize>
__global__ void reduce_all_kernel(const int size, const TIn * data, TOut* output, TOut* buffer) {
extern __shared__ unsigned char shared_memory_[];
TOut* shared_memory = reinterpret_cast<TOut*>(shared_memory_);
// Thread-level indexes:
// Linear index of thread in block.
const int tid_in_block = threadIdx.y * blockDim.x + threadIdx.x;
// Total number of threads in a 2-D block.
const int num_threads_in_block = blockDim.x * blockDim.y;
// Warp-level indexes:
// Warp index of thread.
const int wid_in_block = tid_in_block / NUM_THREADS_PER_WARP;
// Lane index of thread.
const int lid_in_block = tid_in_block % NUM_THREADS_PER_WARP;
// Warp count per block.
const int num_warps_in_block = num_threads_in_block / NUM_THREADS_PER_WARP;
// Grid-level indexes:
// Linear index of block in grid.
const int bid_in_grid = blockIdx.x + blockIdx.y * gridDim.x;
// Linear index of thread in grid.
const int tid_in_grid = bid_in_grid * (blockDim.x * blockDim.y) + tid_in_block;
// Total number of blocks in a 2-D grid.
const int num_blocks_in_grid = gridDim.x * gridDim.y;
// Total number of threads in a 2-D grid with 2-D blocks.
const int num_threads_in_grid = num_blocks_in_grid * num_threads_in_block;
// Thread-level reduction (storage change: global memory -> register).
// One thread reduces NUM_ELEMENTS_PER_THREAD elements to a thread register
// in one iteration.
TOut value = 0;
for (int id = tid_in_grid; id < size; id += NUM_ELEMENTS_PER_THREAD * num_threads_in_grid) {
TOut v[NUM_ELEMENTS_PER_THREAD];
#pragma unroll
for (int i = 0; i < NUM_ELEMENTS_PER_THREAD; i++) {
int offset = id + i * num_threads_in_grid;
if (offset < size) {
v[i] = TOut(TOp()(data[offset]));
} else {
v[i] = TOut(0.0f);
}
}
#pragma unroll
for (int i = 0; i < NUM_ELEMENTS_PER_THREAD; i++) {
value += v[i];
}
}
#if __CUDA_ARCH__ >= 700
__syncwarp();
#else
__syncthreads();
#endif
// Warp-level reduction (storage change: register -> register).
// The values in a warp will be summed up to a scalar. After warp-level
// reduction, each block holds num_warps_in_block values in the shared memory.
TOut value_ = value;
#pragma unroll
for (int stride = NUM_THREADS_PER_WARP / 2; stride > 0; stride /= 2) {
value_ += WARP_SHFL_DOWN(value_, stride);
}
// Return early if only one warp is used for reduction.
// Given a fixed amount of threads, we perfer threads over warps over blocks so that we never have cases such as
// 1. two blocks and each of them has only 1 warp (32 threads).
// 2. two warps and each of them has only 2 threads.
if (num_warps_in_block == 1) {
if (tid_in_grid == 0) {
// Compilation time if-else branch controlled by template argument can be
// optimized out, so there will be no branch in real computation phase.
if (DivideResultBySize) {
output[0] = TFinalOp()(value_ / TOut(size));
} else {
output[0] = TFinalOp()(value_);
}
}
return;
}
if (lid_in_block == 0) {
shared_memory[wid_in_block] = value_;
}
__syncthreads();
// Block-level reduction (storage change: shared memory -> global memory).
// The values in a block will be summed up to a scalar.
// Note that the values are stored in the shared memory.
// Here we assume that the size of shared_memory is smaller
// than num_warps_in_block, so we just keep halving the number
// of threads in each iteartion. Our assumption is always true because
// the size of shared_memory equals to the number of warps.
#pragma unroll
for (int stride = NUM_WARPS_PER_BLOCK / 2; stride > 0; stride /= 2) {
if (tid_in_block + stride < num_warps_in_block) {
shared_memory[tid_in_block] += shared_memory[tid_in_block + stride];
}
__syncthreads();
}
// Return early if only one block is used for reduction.
if (num_blocks_in_grid == 1) {
if (tid_in_grid == 0) {
// Compilation time if-else branch controlled by template argument can be
// optimized out, so there will be no branch in real computation phase.
if (DivideResultBySize) {
output[0] = TFinalOp()(shared_memory[0] / TOut(size));
} else {
output[0] = TFinalOp()(shared_memory[0]);
}
}
return;
}
if (tid_in_block == 0) {
buffer[bid_in_grid] = shared_memory[0];
}
__threadfence();
__syncthreads();
// Grid-level reduciton. We use the last block to sum up values
// stored in the global buffer.
__shared__ bool is_last_block_done;
if (tid_in_block == 0) {
int* p_lock = reinterpret_cast<int*>(buffer + num_blocks_in_grid);
int count = atomicAdd(p_lock, 1);
is_last_block_done = (count == (num_blocks_in_grid - 1));
}
// All threads in each block see if they belong the last active block
// (i.e., the value of is_last_block_done).
__syncthreads();
// Only the block which saw that count equals to num_blocks_in_grid - 1 can
// enter the following block.
if (is_last_block_done) {
const int pow2_bound = least_pow2_bound(num_blocks_in_grid);
for (int stride = pow2_bound / 2; stride > 0; stride /= 2) {
if (tid_in_block < stride && tid_in_block + stride < num_blocks_in_grid) {
buffer[tid_in_block] += buffer[tid_in_block + stride];
}
__syncthreads();
}
// The first thread in the last block assigns the final output.
if (tid_in_block == 0) {
// Compilation time if-else branch controlled by template argument can be
// optimized out, so there will be no branch in real computation phase.
if (DivideResultBySize) {
output[0] = TFinalOp()(buffer[0] / TOut(size));
} else {
output[0] = TFinalOp()(buffer[0]);
}
}
}
}
template<typename TIn, typename TOut, typename TOp, typename TFinalOp, bool DivideResultBySize>
void call_reduce_all_kernel(const TIn *data, TOut *output, int size, TOut *buffer)
{
const auto block_size = compute_block_size(size);
const int num_blocks = compute_grid_size(size);
const dim3 block(block_size.first, block_size.second, 1);
const dim3 grid(num_blocks, 1, 1);
// If more than one blocks are used, then inter-blocks reduction is needed.
if (num_blocks != 1) {
hipMemset(buffer + num_blocks, 0, sizeof(int));
}
const int shared_mem_size = sizeof(TOut) * block_size.first * block_size.second / NUM_THREADS_PER_WARP;
hipLaunchKernelGGL(( reduce_all_kernel<TIn, TOut, TOp, TFinalOp, DivideResultBySize>), dim3(grid), dim3(block), shared_mem_size, 0, size, data, output, buffer);
}
template<typename TIn, typename TOut>
void reduce_sum(const TIn* data, TOut* output, int size, TOut* buffer) {
call_reduce_all_kernel<TIn, TOut, Cast<TOut, TIn>, Identity<TOut>, false>(
data, output, size, buffer);
}
template<typename TIn, typename TOut>
void reduce_square_sum(const TIn* data, TOut* output, int size, TOut* buffer) {
call_reduce_all_kernel<TIn, TOut, Square<TOut, TIn>, Identity<TOut>, false>(
data, output, size, buffer);
}
template<typename TIn, typename TOut>
void reduce_l2_norm(const TIn* data, TOut* output, int size, TOut* buffer) {
call_reduce_all_kernel<TIn, TOut, Square<TOut, TIn>, Sqrt<TOut>, false>(
data, output, size, buffer);
}
template<typename TIn, typename TOut>
void reduce_mean(const TIn* data, TOut* output, int size, TOut* buffer) {
call_reduce_all_kernel<TIn, TOut, Cast<TOut, TIn>, Identity<TOut>, true>(
data, output, size, buffer);
}
template void reduce_sum<half, float>(
const half* data, float* output, int size, float* buffer);
template void reduce_sum<float, float>(
const float* data, float* output, int size, float* buffer);
template void reduce_sum<double, double>(
const double* data, double* output, int size, double* buffer);
template void reduce_square_sum<half, float>(
const half* data, float* output, int size, float* buffer);
template void reduce_square_sum<float, float>(
const float* data, float* output, int size, float* buffer);
template void reduce_square_sum<double, double>(
const double* data, double* output, int size, double* buffer);
template void reduce_l2_norm<half, float>(
const half* data, float* output, int size, float* buffer);
template void reduce_l2_norm<float, float>(
const float* data, float* output, int size, float* buffer);
template void reduce_l2_norm<double, double>(
const double* data, double* output, int size, double* buffer);
template void reduce_mean<half, float>(
const half* data, float* output, int size, float* buffer);
template void reduce_mean<float, float>(
const float* data, float* output, int size, float* buffer);
template void reduce_mean<double, double>(
const double* data, double* output, int size, double* buffer);
bool is_matrix_row_reduction(
const cudnnReduceTensorOp_t cudnn_reduce_op,
const int m,
const int n,
const size_t rank,
std::vector<int64_t> axes) {
if (m < 1)
return false;
if (n < 1)
return false;
if (rank < 2)
return false;
if (cudnn_reduce_op != CUDNN_REDUCE_TENSOR_ADD)
return false;
// Check if all but the last axis are reduced. For example, reducing
// [N, C, H, W]-tensor to [W]-tensor can pass these two checks but reducing
// [N, C]-tensor to [N, 1]-tensor cannot.
if (axes.size() != rank - 1)
return false;
// The last reduced axis should be the second last axis. For
// [N, C, H, W]-input, the sorted axes should be [0, 1, 2].
std::sort(axes.begin(), axes.end());
if (axes.back() != rank - 2)
return false;
return true;
}
template<typename TIn, typename TOut, typename TBuf>
__global__ void reduce_matrix_rows_kernel(const TIn *input, TOut *output, int m, int n) {
constexpr int x_load_count_per_thread = 1;
constexpr int y_load_count_per_thread = 4;
const int t_count_x_in_grid = blockDim.x * gridDim.x;
const int t_count_y_in_grid = blockDim.y * gridDim.y;
const int x_grid_stride = t_count_x_in_grid * x_load_count_per_thread;
const int y_grid_stride = t_count_y_in_grid * y_load_count_per_thread;
const int tid_x_in_grid = threadIdx.x + blockDim.x * blockIdx.x;
const int tid_y_in_grid = threadIdx.y + blockDim.y * blockIdx.y;
const int tid_in_block = threadIdx.x + blockDim.x * threadIdx.y;
// Shape is blockDim.y-by-blockDim.x and element type is TBuf.
extern __shared__ unsigned char shared_memory_[];
TBuf *shared_memory = reinterpret_cast<TBuf*>(shared_memory_);
for (int col = tid_x_in_grid; col < n; col += x_grid_stride) {
shared_memory[tid_in_block] = TBuf(0.0f);
// This loops load multiple blockDim.y-by-blockDim.x sub-tensors from the input.
for (int row = tid_y_in_grid; row < m; row += y_grid_stride) {
TBuf sum = 0.0f;
// Thread-level reduction. Each thread loads y_load_count_per_thread values
// and aggregrate them.
#pragma unroll(y_load_count_per_thread)
for (int row_inner = 0; row_inner < y_load_count_per_thread; ++row_inner) {
int row_final = row + row_inner * t_count_y_in_grid;
int col_final = col;
if (row_final < m && col_final < n) {
sum += TBuf(input[row_final * n + col_final]);
}
}
// Write thread-level reduction result into shared memory.
shared_memory[tid_in_block] += sum;
}
// Wait all threads to finish their thread-level reductions.
__syncthreads();
// This loop conducts reduction on elements stored in shared memory.
// Each block reduces blockDim.y-by-blockDim.x tensor to 1-by-blockDim.x tensor.
#pragma unroll(4)
for (int stride = blockDim.y / 2; stride > 0; stride /= 2) {
if (threadIdx.y < stride) {
shared_memory[tid_in_block] += shared_memory[tid_in_block + stride * blockDim.x];
}
__syncthreads();
}
if (threadIdx.y == 0) {
atomic_add(output + col, TOut(shared_memory[threadIdx.x]));
}
// Make sure all values in shared memory have been written into the output memory.
__syncthreads();
}
}
// This function reduces the given input tensor along all but the last axis.
// For example, [N, C, H, W]-tensor may lead to a output [W]-tensor.
// It's implementation is in reduction_ops.cu and called in reduction_ops.cc.
template<typename TIn, typename TOut, typename TBuf>
void call_reduce_matrix_rows(const TIn *input, TOut *output, int m, int n) {
constexpr int max_num_threads_in_block = 512;
constexpr int max_num_blocks_in_grid = 512;
constexpr int load_count_per_thread = 4;
const int block_x_dim = least_pow2_bound(::max(1, ::min(n, GPU_WARP_SIZE)));
const int block_y_dim = least_pow2_bound(::max(1, ::min(max_num_threads_in_block / block_x_dim, m / load_count_per_thread)));
const int grid_x_dim = ::max(1, ::min(n / block_x_dim, max_num_blocks_in_grid));
const int grid_y_dim = ::max(1, ::min(max_num_blocks_in_grid / grid_x_dim, m / block_y_dim / 4));
const dim3 grid(grid_x_dim, grid_y_dim, 1);
const dim3 block(block_x_dim, block_y_dim, 1);
hipLaunchKernelGGL(( reduce_matrix_rows_kernel<TIn, TOut, TBuf>), dim3(grid), dim3(block), block.y * block.x * sizeof(TBuf), 0,
input, output, m, n);
}
template<typename TIn, typename TOut>
void reduce_matrix_rows(const TIn* data, TOut* output, int m, int n)
{
call_reduce_matrix_rows<TIn, TOut, TOut>(data, output, m, n);
}
template<> void reduce_matrix_rows<half, half>(const half* data, half* output, int m, int n)
{
call_reduce_matrix_rows<half, half, float>(data, output, m, n);
}
template void reduce_matrix_rows<float, float>(
const float* data, float* output, int m, int n);
template void reduce_matrix_rows<double, double>(
const double* data, double* output, int m, int n);
} // namespace cuda
} // namespace onnxruntime
| ca910d7c813155e7dcbc4608cabbe1f159136978.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <algorithm>
#include <cuda.h>
#include <cuda_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/atomic/common.cuh"
#include "reduction_functions.h"
#include "reduction_utils.cuh"
#define NUM_ELEMENTS_PER_THREAD 4
#define NUM_THREADS_PER_WARP 32
#define NUM_WARPS_PER_BLOCK 8
#define MAX_NUM_BLOCKS 256
#define ALL_ONE_MASK 0xFFFFFFFF
#define ONE_MASK 0x00000001
namespace onnxruntime {
namespace cuda {
std::pair<int, int> compute_block_size(int size) {
int x = NUM_THREADS_PER_WARP;
int y = std::min(NUM_WARPS_PER_BLOCK, std::max(1, size / (NUM_ELEMENTS_PER_THREAD * NUM_THREADS_PER_WARP)));
return std::make_pair(x, y);
}
int compute_grid_size(int size) {
const auto block = compute_block_size(size);
return std::min(MAX_NUM_BLOCKS, std::max(1, size / (NUM_ELEMENTS_PER_THREAD * block.first * block.second)));
}
int compute_reduction_buffer_size(int element_size, int size) {
const int num_blocks = compute_grid_size(size);
return static_cast<int>(num_blocks * element_size + sizeof(int));
}
template<typename TIn, typename TOut, typename TOp, typename TFinalOp, bool DivideResultBySize>
__global__ void reduce_all_kernel(const int size, const TIn * data, TOut* output, TOut* buffer) {
extern __shared__ unsigned char shared_memory_[];
TOut* shared_memory = reinterpret_cast<TOut*>(shared_memory_);
// Thread-level indexes:
// Linear index of thread in block.
const int tid_in_block = threadIdx.y * blockDim.x + threadIdx.x;
// Total number of threads in a 2-D block.
const int num_threads_in_block = blockDim.x * blockDim.y;
// Warp-level indexes:
// Warp index of thread.
const int wid_in_block = tid_in_block / NUM_THREADS_PER_WARP;
// Lane index of thread.
const int lid_in_block = tid_in_block % NUM_THREADS_PER_WARP;
// Warp count per block.
const int num_warps_in_block = num_threads_in_block / NUM_THREADS_PER_WARP;
// Grid-level indexes:
// Linear index of block in grid.
const int bid_in_grid = blockIdx.x + blockIdx.y * gridDim.x;
// Linear index of thread in grid.
const int tid_in_grid = bid_in_grid * (blockDim.x * blockDim.y) + tid_in_block;
// Total number of blocks in a 2-D grid.
const int num_blocks_in_grid = gridDim.x * gridDim.y;
// Total number of threads in a 2-D grid with 2-D blocks.
const int num_threads_in_grid = num_blocks_in_grid * num_threads_in_block;
// Thread-level reduction (storage change: global memory -> register).
// One thread reduces NUM_ELEMENTS_PER_THREAD elements to a thread register
// in one iteration.
TOut value = 0;
for (int id = tid_in_grid; id < size; id += NUM_ELEMENTS_PER_THREAD * num_threads_in_grid) {
TOut v[NUM_ELEMENTS_PER_THREAD];
#pragma unroll
for (int i = 0; i < NUM_ELEMENTS_PER_THREAD; i++) {
int offset = id + i * num_threads_in_grid;
if (offset < size) {
v[i] = TOut(TOp()(data[offset]));
} else {
v[i] = TOut(0.0f);
}
}
#pragma unroll
for (int i = 0; i < NUM_ELEMENTS_PER_THREAD; i++) {
value += v[i];
}
}
#if __CUDA_ARCH__ >= 700
__syncwarp();
#else
__syncthreads();
#endif
// Warp-level reduction (storage change: register -> register).
// The values in a warp will be summed up to a scalar. After warp-level
// reduction, each block holds num_warps_in_block values in the shared memory.
TOut value_ = value;
#pragma unroll
for (int stride = NUM_THREADS_PER_WARP / 2; stride > 0; stride /= 2) {
value_ += WARP_SHFL_DOWN(value_, stride);
}
// Return early if only one warp is used for reduction.
// Given a fixed amount of threads, we perfer threads over warps over blocks so that we never have cases such as
// 1. two blocks and each of them has only 1 warp (32 threads).
// 2. two warps and each of them has only 2 threads.
if (num_warps_in_block == 1) {
if (tid_in_grid == 0) {
// Compilation time if-else branch controlled by template argument can be
// optimized out, so there will be no branch in real computation phase.
if (DivideResultBySize) {
output[0] = TFinalOp()(value_ / TOut(size));
} else {
output[0] = TFinalOp()(value_);
}
}
return;
}
if (lid_in_block == 0) {
shared_memory[wid_in_block] = value_;
}
__syncthreads();
// Block-level reduction (storage change: shared memory -> global memory).
// The values in a block will be summed up to a scalar.
// Note that the values are stored in the shared memory.
// Here we assume that the size of shared_memory is smaller
// than num_warps_in_block, so we just keep halving the number
// of threads in each iteartion. Our assumption is always true because
// the size of shared_memory equals to the number of warps.
#pragma unroll
for (int stride = NUM_WARPS_PER_BLOCK / 2; stride > 0; stride /= 2) {
if (tid_in_block + stride < num_warps_in_block) {
shared_memory[tid_in_block] += shared_memory[tid_in_block + stride];
}
__syncthreads();
}
// Return early if only one block is used for reduction.
if (num_blocks_in_grid == 1) {
if (tid_in_grid == 0) {
// Compilation time if-else branch controlled by template argument can be
// optimized out, so there will be no branch in real computation phase.
if (DivideResultBySize) {
output[0] = TFinalOp()(shared_memory[0] / TOut(size));
} else {
output[0] = TFinalOp()(shared_memory[0]);
}
}
return;
}
if (tid_in_block == 0) {
buffer[bid_in_grid] = shared_memory[0];
}
__threadfence();
__syncthreads();
// Grid-level reduciton. We use the last block to sum up values
// stored in the global buffer.
__shared__ bool is_last_block_done;
if (tid_in_block == 0) {
int* p_lock = reinterpret_cast<int*>(buffer + num_blocks_in_grid);
int count = atomicAdd(p_lock, 1);
is_last_block_done = (count == (num_blocks_in_grid - 1));
}
// All threads in each block see if they belong the last active block
// (i.e., the value of is_last_block_done).
__syncthreads();
// Only the block which saw that count equals to num_blocks_in_grid - 1 can
// enter the following block.
if (is_last_block_done) {
const int pow2_bound = least_pow2_bound(num_blocks_in_grid);
for (int stride = pow2_bound / 2; stride > 0; stride /= 2) {
if (tid_in_block < stride && tid_in_block + stride < num_blocks_in_grid) {
buffer[tid_in_block] += buffer[tid_in_block + stride];
}
__syncthreads();
}
// The first thread in the last block assigns the final output.
if (tid_in_block == 0) {
// Compilation time if-else branch controlled by template argument can be
// optimized out, so there will be no branch in real computation phase.
if (DivideResultBySize) {
output[0] = TFinalOp()(buffer[0] / TOut(size));
} else {
output[0] = TFinalOp()(buffer[0]);
}
}
}
}
template<typename TIn, typename TOut, typename TOp, typename TFinalOp, bool DivideResultBySize>
void call_reduce_all_kernel(const TIn *data, TOut *output, int size, TOut *buffer)
{
const auto block_size = compute_block_size(size);
const int num_blocks = compute_grid_size(size);
const dim3 block(block_size.first, block_size.second, 1);
const dim3 grid(num_blocks, 1, 1);
// If more than one blocks are used, then inter-blocks reduction is needed.
if (num_blocks != 1) {
cudaMemset(buffer + num_blocks, 0, sizeof(int));
}
const int shared_mem_size = sizeof(TOut) * block_size.first * block_size.second / NUM_THREADS_PER_WARP;
reduce_all_kernel<TIn, TOut, TOp, TFinalOp, DivideResultBySize><<<grid, block, shared_mem_size>>>(size, data, output, buffer);
}
template<typename TIn, typename TOut>
void reduce_sum(const TIn* data, TOut* output, int size, TOut* buffer) {
call_reduce_all_kernel<TIn, TOut, Cast<TOut, TIn>, Identity<TOut>, false>(
data, output, size, buffer);
}
template<typename TIn, typename TOut>
void reduce_square_sum(const TIn* data, TOut* output, int size, TOut* buffer) {
call_reduce_all_kernel<TIn, TOut, Square<TOut, TIn>, Identity<TOut>, false>(
data, output, size, buffer);
}
template<typename TIn, typename TOut>
void reduce_l2_norm(const TIn* data, TOut* output, int size, TOut* buffer) {
call_reduce_all_kernel<TIn, TOut, Square<TOut, TIn>, Sqrt<TOut>, false>(
data, output, size, buffer);
}
template<typename TIn, typename TOut>
void reduce_mean(const TIn* data, TOut* output, int size, TOut* buffer) {
call_reduce_all_kernel<TIn, TOut, Cast<TOut, TIn>, Identity<TOut>, true>(
data, output, size, buffer);
}
template void reduce_sum<half, float>(
const half* data, float* output, int size, float* buffer);
template void reduce_sum<float, float>(
const float* data, float* output, int size, float* buffer);
template void reduce_sum<double, double>(
const double* data, double* output, int size, double* buffer);
template void reduce_square_sum<half, float>(
const half* data, float* output, int size, float* buffer);
template void reduce_square_sum<float, float>(
const float* data, float* output, int size, float* buffer);
template void reduce_square_sum<double, double>(
const double* data, double* output, int size, double* buffer);
template void reduce_l2_norm<half, float>(
const half* data, float* output, int size, float* buffer);
template void reduce_l2_norm<float, float>(
const float* data, float* output, int size, float* buffer);
template void reduce_l2_norm<double, double>(
const double* data, double* output, int size, double* buffer);
template void reduce_mean<half, float>(
const half* data, float* output, int size, float* buffer);
template void reduce_mean<float, float>(
const float* data, float* output, int size, float* buffer);
template void reduce_mean<double, double>(
const double* data, double* output, int size, double* buffer);
bool is_matrix_row_reduction(
const cudnnReduceTensorOp_t cudnn_reduce_op,
const int m,
const int n,
const size_t rank,
std::vector<int64_t> axes) {
if (m < 1)
return false;
if (n < 1)
return false;
if (rank < 2)
return false;
if (cudnn_reduce_op != CUDNN_REDUCE_TENSOR_ADD)
return false;
// Check if all but the last axis are reduced. For example, reducing
// [N, C, H, W]-tensor to [W]-tensor can pass these two checks but reducing
// [N, C]-tensor to [N, 1]-tensor cannot.
if (axes.size() != rank - 1)
return false;
// The last reduced axis should be the second last axis. For
// [N, C, H, W]-input, the sorted axes should be [0, 1, 2].
std::sort(axes.begin(), axes.end());
if (axes.back() != rank - 2)
return false;
return true;
}
template<typename TIn, typename TOut, typename TBuf>
__global__ void reduce_matrix_rows_kernel(const TIn *input, TOut *output, int m, int n) {
constexpr int x_load_count_per_thread = 1;
constexpr int y_load_count_per_thread = 4;
const int t_count_x_in_grid = blockDim.x * gridDim.x;
const int t_count_y_in_grid = blockDim.y * gridDim.y;
const int x_grid_stride = t_count_x_in_grid * x_load_count_per_thread;
const int y_grid_stride = t_count_y_in_grid * y_load_count_per_thread;
const int tid_x_in_grid = threadIdx.x + blockDim.x * blockIdx.x;
const int tid_y_in_grid = threadIdx.y + blockDim.y * blockIdx.y;
const int tid_in_block = threadIdx.x + blockDim.x * threadIdx.y;
// Shape is blockDim.y-by-blockDim.x and element type is TBuf.
extern __shared__ unsigned char shared_memory_[];
TBuf *shared_memory = reinterpret_cast<TBuf*>(shared_memory_);
for (int col = tid_x_in_grid; col < n; col += x_grid_stride) {
shared_memory[tid_in_block] = TBuf(0.0f);
// This loops load multiple blockDim.y-by-blockDim.x sub-tensors from the input.
for (int row = tid_y_in_grid; row < m; row += y_grid_stride) {
TBuf sum = 0.0f;
// Thread-level reduction. Each thread loads y_load_count_per_thread values
// and aggregrate them.
#pragma unroll(y_load_count_per_thread)
for (int row_inner = 0; row_inner < y_load_count_per_thread; ++row_inner) {
int row_final = row + row_inner * t_count_y_in_grid;
int col_final = col;
if (row_final < m && col_final < n) {
sum += TBuf(input[row_final * n + col_final]);
}
}
// Write thread-level reduction result into shared memory.
shared_memory[tid_in_block] += sum;
}
// Wait all threads to finish their thread-level reductions.
__syncthreads();
// This loop conducts reduction on elements stored in shared memory.
// Each block reduces blockDim.y-by-blockDim.x tensor to 1-by-blockDim.x tensor.
#pragma unroll(4)
for (int stride = blockDim.y / 2; stride > 0; stride /= 2) {
if (threadIdx.y < stride) {
shared_memory[tid_in_block] += shared_memory[tid_in_block + stride * blockDim.x];
}
__syncthreads();
}
if (threadIdx.y == 0) {
atomic_add(output + col, TOut(shared_memory[threadIdx.x]));
}
// Make sure all values in shared memory have been written into the output memory.
__syncthreads();
}
}
// This function reduces the given input tensor along all but the last axis.
// For example, [N, C, H, W]-tensor may lead to a output [W]-tensor.
// It's implementation is in reduction_ops.cu and called in reduction_ops.cc.
template<typename TIn, typename TOut, typename TBuf>
void call_reduce_matrix_rows(const TIn *input, TOut *output, int m, int n) {
constexpr int max_num_threads_in_block = 512;
constexpr int max_num_blocks_in_grid = 512;
constexpr int load_count_per_thread = 4;
const int block_x_dim = least_pow2_bound(std::max(1, std::min(n, GPU_WARP_SIZE)));
const int block_y_dim = least_pow2_bound(std::max(1, std::min(max_num_threads_in_block / block_x_dim, m / load_count_per_thread)));
const int grid_x_dim = std::max(1, std::min(n / block_x_dim, max_num_blocks_in_grid));
const int grid_y_dim = std::max(1, std::min(max_num_blocks_in_grid / grid_x_dim, m / block_y_dim / 4));
const dim3 grid(grid_x_dim, grid_y_dim, 1);
const dim3 block(block_x_dim, block_y_dim, 1);
reduce_matrix_rows_kernel<TIn, TOut, TBuf><<<grid, block, block.y * block.x * sizeof(TBuf)>>>(
input, output, m, n);
}
template<typename TIn, typename TOut>
void reduce_matrix_rows(const TIn* data, TOut* output, int m, int n)
{
call_reduce_matrix_rows<TIn, TOut, TOut>(data, output, m, n);
}
template<> void reduce_matrix_rows<half, half>(const half* data, half* output, int m, int n)
{
call_reduce_matrix_rows<half, half, float>(data, output, m, n);
}
template void reduce_matrix_rows<float, float>(
const float* data, float* output, int m, int n);
template void reduce_matrix_rows<double, double>(
const double* data, double* output, int m, int n);
} // namespace cuda
} // namespace onnxruntime
|
72050dd7a3ac623b04fae1df952c1e9771509f3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
int i = blockIdx.x*128 + threadIdx.x;
int j = blockIdx.y*2;
float sum0 = 0.0, sum1 = 0.0, sum2 = 0.0, sum3 = 0.0;
float sum4 = 0.0, sum5 = 0.0, sum6 = 0.0, sum7 = 0.0;
for( int k = 0; k < p; ++k )
{
float b_tmp0 = b[j+pitch_b*k];
float b_tmp1 = b[j+1+pitch_b*k];
sum0 += b_tmp0*c[k*pitch_c+i];
sum1 += b_tmp0*c[k*pitch_c + i + 32];
sum2 += b_tmp0*c[k*pitch_c + i + 64];
sum3 += b_tmp0*c[k*pitch_c + i + 96];
sum4 += b_tmp1*c[k*pitch_c+i];
sum5 += b_tmp1*c[k*pitch_c + i + 32];
sum6 += b_tmp1*c[k*pitch_c + i + 64];
sum7 += b_tmp1*c[k*pitch_c + i + 96];
}
a[i+pitch_a*j] = sum0;
a[i+32+pitch_a*j] = sum1;
a[i+64+pitch_a*j] = sum2;
a[i+96+pitch_a*j] = sum3;
a[i+pitch_a*(j+1)] = sum4;
a[i+32+pitch_a*(j+1)] = sum5;
a[i+64+pitch_a*(j+1)] = sum6;
a[i+96+pitch_a*(j+1)] = sum7;
}
| 72050dd7a3ac623b04fae1df952c1e9771509f3f.cu | extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
int i = blockIdx.x*128 + threadIdx.x;
int j = blockIdx.y*2;
float sum0 = 0.0, sum1 = 0.0, sum2 = 0.0, sum3 = 0.0;
float sum4 = 0.0, sum5 = 0.0, sum6 = 0.0, sum7 = 0.0;
for( int k = 0; k < p; ++k )
{
float b_tmp0 = b[j+pitch_b*k];
float b_tmp1 = b[j+1+pitch_b*k];
sum0 += b_tmp0*c[k*pitch_c+i];
sum1 += b_tmp0*c[k*pitch_c + i + 32];
sum2 += b_tmp0*c[k*pitch_c + i + 64];
sum3 += b_tmp0*c[k*pitch_c + i + 96];
sum4 += b_tmp1*c[k*pitch_c+i];
sum5 += b_tmp1*c[k*pitch_c + i + 32];
sum6 += b_tmp1*c[k*pitch_c + i + 64];
sum7 += b_tmp1*c[k*pitch_c + i + 96];
}
a[i+pitch_a*j] = sum0;
a[i+32+pitch_a*j] = sum1;
a[i+64+pitch_a*j] = sum2;
a[i+96+pitch_a*j] = sum3;
a[i+pitch_a*(j+1)] = sum4;
a[i+32+pitch_a*(j+1)] = sum5;
a[i+64+pitch_a*(j+1)] = sum6;
a[i+96+pitch_a*(j+1)] = sum7;
}
|
57242aa46a25fbc5389290ca1cd5378f34c625d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MatrixAddition.h"
#include "GPUErrors.h"
//2D Kernel
__global__ void MatrixAddition2DG2DB(float* g_A, float* g_B, float* g_C, const int ny, const int nx)
{
int ix = threadIdx.x + (blockIdx.x * blockDim.x); //Index of thread along x direction
int iy = threadIdx.y + (blockIdx.y * blockDim.y);
//Linear or Scalar index of thread
int idx = iy * nx + ix;
if (ix < nx && iy < ny)
{
g_C[idx] = g_A[idx] + g_B[idx];
}
}
//GPU Host Function
__host__ void MatrixAdditionOnGPU2DG2DB(float* h_A, float* h_B, float* h_C, float* ref, const int ny, const int nx)
{
float* d_A, * d_B, * d_C;
const int MatrixSizeInBytes = ny * nx * sizeof(float);
hipEvent_t kernel_start;
hipEvent_t kernel_stop;
float fElapsedTime;
HandleCUDAError(hipEventCreate(&kernel_start));
HandleCUDAError(hipEventCreate(&kernel_stop));
//Allocate device memory on the global memory
HandleCUDAError(hipMalloc((void**)&d_A, MatrixSizeInBytes));
HandleCUDAError(hipMalloc((void**)&d_B, MatrixSizeInBytes));
HandleCUDAError(hipMalloc((void**)&d_C, MatrixSizeInBytes));
//transfer data from CPU Memory to GPU Memory
chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
HandleCUDAError(hipMemcpy(d_A, h_A, MatrixSizeInBytes, hipMemcpyHostToDevice));
HandleCUDAError(hipMemcpy(d_B, h_B, MatrixSizeInBytes, hipMemcpyHostToDevice));
end = std::chrono::system_clock::now();
std::chrono::duration<double> elasped_seconds = end - start;
cout << "Memory Copy - HostToDevice: " << (elasped_seconds.count() * 1000.0f) << " msecs" << endl;
//Kernel Invoke Parameters - 2D Grid and 2D Blocks
int dimx = 32;
int dimy = 32;
dim3 block(dimx,dimy); //creates a 2d block
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
cout << "2D Grid Dimension" << endl;
cout << "\tNumber of Block along X dimension: " << grid.x << endl;
cout << "\tNumber of Block along Y dimension: " << grid.y << endl;
cout << "2D Block Dimension" << endl;
cout << "\tNumber of threads along X dimension: " << block.x << endl;
cout << "\tNumber of threads along Y dimension: " << block.y << endl;
HandleCUDAError(hipEventRecord(kernel_start));
MatrixAddition2DG2DB << <grid, block >> > (d_A, d_B, d_C, nx, ny);
HandleCUDAError(hipEventRecord(kernel_stop));
HandleCUDAError(hipEventSynchronize(kernel_stop));
GetCUDARunTimeError();
HandleCUDAError(hipEventElapsedTime(&fElapsedTime, kernel_start, kernel_stop));
start = std::chrono::system_clock::now();
HandleCUDAError(hipMemcpy(h_C, d_C, MatrixSizeInBytes, hipMemcpyDeviceToHost));
end = std::chrono::system_clock::now();
elasped_seconds = end - start;
MatrixAdditionVerification(ref, h_C, ny, nx);
cout << "Memory Copy - DeviceToHost: " << (elasped_seconds.count() * 1000.0f) << " msecs" << endl;
cout << "2DG2DB Elapsed Time (GPU) = " << fElapsedTime << " msecs" << endl;
HandleCUDAError(hipFree(d_A));
HandleCUDAError(hipFree(d_B));
HandleCUDAError(hipFree(d_C));
HandleCUDAError(hipEventDestroy(kernel_start));
HandleCUDAError(hipEventDestroy(kernel_stop));
HandleCUDAError(hipDeviceReset());
} | 57242aa46a25fbc5389290ca1cd5378f34c625d4.cu | #include "MatrixAddition.h"
#include "GPUErrors.h"
//2D Kernel
__global__ void MatrixAddition2DG2DB(float* g_A, float* g_B, float* g_C, const int ny, const int nx)
{
int ix = threadIdx.x + (blockIdx.x * blockDim.x); //Index of thread along x direction
int iy = threadIdx.y + (blockIdx.y * blockDim.y);
//Linear or Scalar index of thread
int idx = iy * nx + ix;
if (ix < nx && iy < ny)
{
g_C[idx] = g_A[idx] + g_B[idx];
}
}
//GPU Host Function
__host__ void MatrixAdditionOnGPU2DG2DB(float* h_A, float* h_B, float* h_C, float* ref, const int ny, const int nx)
{
float* d_A, * d_B, * d_C;
const int MatrixSizeInBytes = ny * nx * sizeof(float);
cudaEvent_t kernel_start;
cudaEvent_t kernel_stop;
float fElapsedTime;
HandleCUDAError(cudaEventCreate(&kernel_start));
HandleCUDAError(cudaEventCreate(&kernel_stop));
//Allocate device memory on the global memory
HandleCUDAError(cudaMalloc((void**)&d_A, MatrixSizeInBytes));
HandleCUDAError(cudaMalloc((void**)&d_B, MatrixSizeInBytes));
HandleCUDAError(cudaMalloc((void**)&d_C, MatrixSizeInBytes));
//transfer data from CPU Memory to GPU Memory
chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
HandleCUDAError(cudaMemcpy(d_A, h_A, MatrixSizeInBytes, cudaMemcpyHostToDevice));
HandleCUDAError(cudaMemcpy(d_B, h_B, MatrixSizeInBytes, cudaMemcpyHostToDevice));
end = std::chrono::system_clock::now();
std::chrono::duration<double> elasped_seconds = end - start;
cout << "Memory Copy - HostToDevice: " << (elasped_seconds.count() * 1000.0f) << " msecs" << endl;
//Kernel Invoke Parameters - 2D Grid and 2D Blocks
int dimx = 32;
int dimy = 32;
dim3 block(dimx,dimy); //creates a 2d block
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
cout << "2D Grid Dimension" << endl;
cout << "\tNumber of Block along X dimension: " << grid.x << endl;
cout << "\tNumber of Block along Y dimension: " << grid.y << endl;
cout << "2D Block Dimension" << endl;
cout << "\tNumber of threads along X dimension: " << block.x << endl;
cout << "\tNumber of threads along Y dimension: " << block.y << endl;
HandleCUDAError(cudaEventRecord(kernel_start));
MatrixAddition2DG2DB << <grid, block >> > (d_A, d_B, d_C, nx, ny);
HandleCUDAError(cudaEventRecord(kernel_stop));
HandleCUDAError(cudaEventSynchronize(kernel_stop));
GetCUDARunTimeError();
HandleCUDAError(cudaEventElapsedTime(&fElapsedTime, kernel_start, kernel_stop));
start = std::chrono::system_clock::now();
HandleCUDAError(cudaMemcpy(h_C, d_C, MatrixSizeInBytes, cudaMemcpyDeviceToHost));
end = std::chrono::system_clock::now();
elasped_seconds = end - start;
MatrixAdditionVerification(ref, h_C, ny, nx);
cout << "Memory Copy - DeviceToHost: " << (elasped_seconds.count() * 1000.0f) << " msecs" << endl;
cout << "2DG2DB Elapsed Time (GPU) = " << fElapsedTime << " msecs" << endl;
HandleCUDAError(cudaFree(d_A));
HandleCUDAError(cudaFree(d_B));
HandleCUDAError(cudaFree(d_C));
HandleCUDAError(cudaEventDestroy(kernel_start));
HandleCUDAError(cudaEventDestroy(kernel_stop));
HandleCUDAError(cudaDeviceReset());
} |
515c6d266c7067a1b04afdcb435080ac13bc8e55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/normalize_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, Dtype epsilon,
const Dtype* data, Dtype* norm_data) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
norm_data[index] = sum + epsilon;
}
}
template <typename Dtype>
__global__ void kernel_channel_scale(const int num, const int channels, const int spatial_dim,
const Dtype* data, const Dtype* norm_data,
Dtype* output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
output_data[index] = data[index] * norm_data[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_scal(const int num, const int channels, const int spatial_dim,
const Dtype* norm_data, Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
input_output_data[index] *= norm_data[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void NormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* square_data = squared_.mutable_gpu_data();
Dtype* norm_data = norm_.mutable_gpu_data();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int spatial_dim = bottom[0]->height() * bottom[0]->width();
if (normalize_type_ == "L2") {
caffe_gpu_powx(num*channels*spatial_dim, bottom_data, Dtype(2), square_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(num*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, 1e-12, square_data, norm_data);
caffe_gpu_powx(num * spatial_dim, norm_data, Dtype(-0.5), norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom_data, norm_data, top_data);
}
else if (normalize_type_ == "L1") {
caffe_gpu_abs(num*channels*spatial_dim, bottom_data, square_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(num*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, 1e-6, square_data, norm_data);
caffe_gpu_powx(num * spatial_dim, norm_data, Dtype(-1), norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom_data, norm_data, top_data);
}
else {
NOT_IMPLEMENTED;
}
}
template <typename Dtype>
void NormalizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* square_data = squared_.gpu_data();
const Dtype* norm_data = norm_.gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* norm_diff = norm_.mutable_gpu_diff();
int num = top[0]->num();
int channels = top[0]->channels();
int spatial_dim = bottom[0]->height() * bottom[0]->width();
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, top_data, top_diff, norm_diff);
if (normalize_type_ == "L2") {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, top_data, norm_diff, bottom_diff);
}
else if (normalize_type_ == "L1") {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, square_data, norm_diff, bottom_diff);
}
else {
NOT_IMPLEMENTED;
}
caffe_gpu_sub(num * channels * spatial_dim, top_diff, bottom_diff, bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, norm_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(NormalizeLayer);
} // namespace caffe | 515c6d266c7067a1b04afdcb435080ac13bc8e55.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/normalize_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, Dtype epsilon,
const Dtype* data, Dtype* norm_data) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
norm_data[index] = sum + epsilon;
}
}
template <typename Dtype>
__global__ void kernel_channel_scale(const int num, const int channels, const int spatial_dim,
const Dtype* data, const Dtype* norm_data,
Dtype* output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
output_data[index] = data[index] * norm_data[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_scal(const int num, const int channels, const int spatial_dim,
const Dtype* norm_data, Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
input_output_data[index] *= norm_data[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void NormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* square_data = squared_.mutable_gpu_data();
Dtype* norm_data = norm_.mutable_gpu_data();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int spatial_dim = bottom[0]->height() * bottom[0]->width();
if (normalize_type_ == "L2") {
caffe_gpu_powx(num*channels*spatial_dim, bottom_data, Dtype(2), square_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(num*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, 1e-12, square_data, norm_data);
caffe_gpu_powx(num * spatial_dim, norm_data, Dtype(-0.5), norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom_data, norm_data, top_data);
}
else if (normalize_type_ == "L1") {
caffe_gpu_abs(num*channels*spatial_dim, bottom_data, square_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(num*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, 1e-6, square_data, norm_data);
caffe_gpu_powx(num * spatial_dim, norm_data, Dtype(-1), norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom_data, norm_data, top_data);
}
else {
NOT_IMPLEMENTED;
}
}
template <typename Dtype>
void NormalizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* square_data = squared_.gpu_data();
const Dtype* norm_data = norm_.gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* norm_diff = norm_.mutable_gpu_diff();
int num = top[0]->num();
int channels = top[0]->channels();
int spatial_dim = bottom[0]->height() * bottom[0]->width();
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, top_data, top_diff, norm_diff);
if (normalize_type_ == "L2") {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, top_data, norm_diff, bottom_diff);
}
else if (normalize_type_ == "L1") {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, square_data, norm_diff, bottom_diff);
}
else {
NOT_IMPLEMENTED;
}
caffe_gpu_sub(num * channels * spatial_dim, top_diff, bottom_diff, bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, norm_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(NormalizeLayer);
} // namespace caffe |
7d76218691c860fbb8fac3b64924b834650e2521.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define __DEBUG
#define VSQR 0.1
#define TSCALE 1.0
#define CUDA_CALL(err) __cudaSafeCall(err, __FILE__, __LINE__)
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__, __LINE__)
/**************************************
* void __cudaSafeCall(hipError_t err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
inline void __cudaSafeCall(hipError_t err, const char* file, const int line) {
#ifdef __DEBUG
#pragma warning(push)
#pragma warning(disable : 4127) // Prevent warning on do-while(0);
do {
if (hipSuccess != err) {
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString(err));
exit(-1);
}
} while (0);
#pragma warning(pop)
#endif // __DEBUG
return;
}
inline void __cudaCheckError(const char* file, const int line) {
#ifdef __DEBUG
#pragma warning(push)
#pragma warning(disable : 4127) // Prevent warning on do-while(0);
do {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s.\n", file, line, hipGetErrorString(err));
exit(-1);
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}*/
} while (0);
#pragma warning(pop)
#endif // __DEBUG
return;
}
int tpdt(double* t, double dt, double end_time);
__device__ double f_gpu(double p, double t) { return -expf(-TSCALE * t) * p; }
__global__ void evolve_gpu(double* un, double* uc, double* uo, double* pebbles, int n, double h, double dt, double t)
{
__shared__ int i, j, idx;
idx = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x + threadIdx.x);
i = idx / n;
j = idx % n;
if (i == 0 || i == n - 1 || j == 0 || j == n - 1)
un[idx] = 0.;
else
un[idx] = 2 * uc[idx] - uo[idx] + VSQR * (dt * dt) * ((uc[idx - 1] + uc[idx + 1] + uc[idx + n] + uc[idx - n] + 0.25 * (uc[idx - 1 - n] + uc[idx - 1 + n] + uc[idx + 1 - n] + uc[idx + 1 + n]) - 5 * uc[idx]) / (h * h) + f_gpu(pebbles[idx], t));
}
void run_gpu(double* u, double* u0, double* u1, double* pebbles, int n, double h, double end_time, int nthreads) {
hipEvent_t kstart, kstop;
float ktime;
/* HW2: Define your local variables here */
// declare the variables
double* gpu_uc;
double* gpu_uo;
double* gpu_un;
double* gpu_pebbles;
/* Set up device timers */
CUDA_CALL(hipSetDevice(0));
CUDA_CALL(hipEventCreate(&kstart));
CUDA_CALL(hipEventCreate(&kstop));
/* HW2: Add CUDA kernel call preperation code here */
// malloc mem on GPU and copy the content
hipMalloc((void**)&gpu_uc, sizeof(double) * n * n);
hipMalloc((void**)&gpu_uo, sizeof(double) * n * n);
hipMalloc((void**)&gpu_un, sizeof(double) * n * n);
hipMalloc((void**)&gpu_pebbles, sizeof(double) * n * n);
hipMemcpy((void*)gpu_uo, (void*)u0, sizeof(double) * n * n, hipMemcpyHostToDevice);
hipMemcpy((void*)gpu_uc, (void*)u1, sizeof(double) * n * n, hipMemcpyHostToDevice);
hipMemcpy((void*)gpu_pebbles, (void*)pebbles, sizeof(double) * n * n, hipMemcpyHostToDevice);
double t = 0., dt = h / 2.;
int grid_size = n / nthreads;
int block_size = nthreads;
dim3 grid(grid_size, grid_size);
dim3 block(block_size, block_size);
/* Start GPU computation timer */
CUDA_CALL(hipEventRecord(kstart, 0));
/* HW2: Add main lake simulation loop here */
// do the calculation
while (1) {
hipLaunchKernelGGL(( evolve_gpu), dim3(grid), dim3(block), 0, 0, gpu_un, gpu_uc, gpu_uo, gpu_pebbles, n, h, dt, t);
hipMemcpy((void*)gpu_uo, (void*)gpu_uc, sizeof(double) * n * n, hipMemcpyDeviceToDevice);
hipMemcpy((void*)gpu_uc, (void*)gpu_un, sizeof(double) * n * n, hipMemcpyDeviceToDevice);
if (!tpdt(&t, dt, end_time))
break;
}
hipMemcpy((void*)u, (void*)gpu_un, sizeof(double) * n * n, hipMemcpyDeviceToHost);
/* Stop GPU computation timer */
CUDA_CALL(hipEventRecord(kstop, 0));
CUDA_CALL(hipEventSynchronize(kstop));
CUDA_CALL(hipEventElapsedTime(&ktime, kstart, kstop));
printf("GPU computation: %f msec\n", ktime);
/* HW2: Add post CUDA kernel call processing and cleanup here */
/* timer cleanup */
CUDA_CALL(hipEventDestroy(kstart));
CUDA_CALL(hipEventDestroy(kstop));
}
| 7d76218691c860fbb8fac3b64924b834650e2521.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define __DEBUG
#define VSQR 0.1
#define TSCALE 1.0
#define CUDA_CALL(err) __cudaSafeCall(err, __FILE__, __LINE__)
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__, __LINE__)
/**************************************
* void __cudaSafeCall(cudaError err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
inline void __cudaSafeCall(cudaError err, const char* file, const int line) {
#ifdef __DEBUG
#pragma warning(push)
#pragma warning(disable : 4127) // Prevent warning on do-while(0);
do {
if (cudaSuccess != err) {
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err));
exit(-1);
}
} while (0);
#pragma warning(pop)
#endif // __DEBUG
return;
}
inline void __cudaCheckError(const char* file, const int line) {
#ifdef __DEBUG
#pragma warning(push)
#pragma warning(disable : 4127) // Prevent warning on do-while(0);
do {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s.\n", file, line, cudaGetErrorString(err));
exit(-1);
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
} while (0);
#pragma warning(pop)
#endif // __DEBUG
return;
}
int tpdt(double* t, double dt, double end_time);
__device__ double f_gpu(double p, double t) { return -expf(-TSCALE * t) * p; }
__global__ void evolve_gpu(double* un, double* uc, double* uo, double* pebbles, int n, double h, double dt, double t)
{
__shared__ int i, j, idx;
idx = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x + threadIdx.x);
i = idx / n;
j = idx % n;
if (i == 0 || i == n - 1 || j == 0 || j == n - 1)
un[idx] = 0.;
else
un[idx] = 2 * uc[idx] - uo[idx] + VSQR * (dt * dt) * ((uc[idx - 1] + uc[idx + 1] + uc[idx + n] + uc[idx - n] + 0.25 * (uc[idx - 1 - n] + uc[idx - 1 + n] + uc[idx + 1 - n] + uc[idx + 1 + n]) - 5 * uc[idx]) / (h * h) + f_gpu(pebbles[idx], t));
}
void run_gpu(double* u, double* u0, double* u1, double* pebbles, int n, double h, double end_time, int nthreads) {
cudaEvent_t kstart, kstop;
float ktime;
/* HW2: Define your local variables here */
// declare the variables
double* gpu_uc;
double* gpu_uo;
double* gpu_un;
double* gpu_pebbles;
/* Set up device timers */
CUDA_CALL(cudaSetDevice(0));
CUDA_CALL(cudaEventCreate(&kstart));
CUDA_CALL(cudaEventCreate(&kstop));
/* HW2: Add CUDA kernel call preperation code here */
// malloc mem on GPU and copy the content
cudaMalloc((void**)&gpu_uc, sizeof(double) * n * n);
cudaMalloc((void**)&gpu_uo, sizeof(double) * n * n);
cudaMalloc((void**)&gpu_un, sizeof(double) * n * n);
cudaMalloc((void**)&gpu_pebbles, sizeof(double) * n * n);
cudaMemcpy((void*)gpu_uo, (void*)u0, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy((void*)gpu_uc, (void*)u1, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy((void*)gpu_pebbles, (void*)pebbles, sizeof(double) * n * n, cudaMemcpyHostToDevice);
double t = 0., dt = h / 2.;
int grid_size = n / nthreads;
int block_size = nthreads;
dim3 grid(grid_size, grid_size);
dim3 block(block_size, block_size);
/* Start GPU computation timer */
CUDA_CALL(cudaEventRecord(kstart, 0));
/* HW2: Add main lake simulation loop here */
// do the calculation
while (1) {
evolve_gpu<<<grid, block>>>(gpu_un, gpu_uc, gpu_uo, gpu_pebbles, n, h, dt, t);
cudaMemcpy((void*)gpu_uo, (void*)gpu_uc, sizeof(double) * n * n, cudaMemcpyDeviceToDevice);
cudaMemcpy((void*)gpu_uc, (void*)gpu_un, sizeof(double) * n * n, cudaMemcpyDeviceToDevice);
if (!tpdt(&t, dt, end_time))
break;
}
cudaMemcpy((void*)u, (void*)gpu_un, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
/* Stop GPU computation timer */
CUDA_CALL(cudaEventRecord(kstop, 0));
CUDA_CALL(cudaEventSynchronize(kstop));
CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop));
printf("GPU computation: %f msec\n", ktime);
/* HW2: Add post CUDA kernel call processing and cleanup here */
/* timer cleanup */
CUDA_CALL(cudaEventDestroy(kstart));
CUDA_CALL(cudaEventDestroy(kstop));
}
|
82dcddd8da9f10c4d819495d86756f45a32cfff6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///
/// @file rdwt53.cu
/// @brief CUDA implementation of reverse 5/3 2D DWT.
/// @author Martin Jirman ([email protected])
/// @date 2011-02-04 14:19
///
///
/// Copyright (c) 2011 Martin Jirman
/// All rights reserved.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright
/// notice, this list of conditions and the following disclaimer in the
/// documentation and/or other materials provided with the distribution.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
///
#include "common.h"
#include "transform_buffer.h"
#include "io.h"
namespace dwt_cuda {
/// Wraps shared momory buffer and algorithms needed for computing 5/3 RDWT
/// using sliding window and lifting schema.
/// @tparam WIN_SIZE_X width of sliding window
/// @tparam WIN_SIZE_Y height of sliding window
template <int WIN_SIZE_X, int WIN_SIZE_Y>
class RDWT53 {
private:
/// Shared memory buffer used for 5/3 DWT transforms.
typedef TransformBuffer<int, WIN_SIZE_X, WIN_SIZE_Y + 3, 2> RDWT53Buffer;
/// Shared buffer used for reverse 5/3 DWT.
RDWT53Buffer buffer;
/// Difference between indices of two vertically neighboring items in buffer.
enum { STRIDE = RDWT53Buffer::VERTICAL_STRIDE };
/// Info needed for loading of one input column from input image.
/// @tparam CHECKED true if loader should check boundaries
template <bool CHECKED>
struct RDWT53Column {
/// loader of pixels from column in input image
VerticalDWTBandLoader<int, CHECKED> loader;
/// Offset of corresponding column in shared buffer.
int offset;
/// Sets all fields to some values to avoid 'uninitialized' warnings.
__device__ void clear() {
offset = 0;
loader.clear();
}
};
/// 5/3 DWT reverse update operation.
struct Reverse53Update {
__device__ void operator() (const int p, int & c, const int n) const {
c -= (p + n + 2) / 4; // F.3, page 118, ITU-T Rec. T.800 final draft
}
};
/// 5/3 DWT reverse predict operation.
struct Reverse53Predict {
__device__ void operator() (const int p, int & c, const int n) const {
c += (p + n) / 2; // F.4, page 118, ITU-T Rec. T.800 final draft
}
};
/// Horizontal 5/3 RDWT on specified lines of transform buffer.
/// @param lines number of lines to be transformed
/// @param firstLine index of the first line to be transformed
__device__ void horizontalTransform(const int lines, const int firstLine) {
__syncthreads();
buffer.forEachHorizontalEven(firstLine, lines, Reverse53Update());
__syncthreads();
buffer.forEachHorizontalOdd(firstLine, lines, Reverse53Predict());
__syncthreads();
}
/// Using given loader, it loads another WIN_SIZE_Y coefficients
/// into specified column.
/// @tparam CHECKED true if loader should check image boundaries
/// @param input input coefficients to load from
/// @param col info about loaded column
template <bool CHECKED>
inline __device__ void loadWindowIntoColumn(const int * const input,
RDWT53Column<CHECKED> & col) {
for(int i = 3; i < (3 + WIN_SIZE_Y); i += 2) {
buffer[col.offset + i * STRIDE] = col.loader.loadLowFrom(input);
buffer[col.offset + (i + 1) * STRIDE] = col.loader.loadHighFrom(input);
}
}
/// Initializes one column of shared transform buffer with 7 input pixels.
/// Those 7 pixels will not be transformed. Also initializes given loader.
/// @tparam CHECKED true if loader should check image boundaries
/// @param columnX x coordinate of column in shared transform buffer
/// @param input input image
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param loader (uninitialized) info about loaded column
template <bool CHECKED>
__device__ void initColumn(const int columnX, const int * const input,
const int sizeX, const int sizeY,
RDWT53Column<CHECKED> & column,
const int firstY) {
// coordinates of the first coefficient to be loaded
const int firstX = blockIdx.x * WIN_SIZE_X + columnX;
// offset of the column with index 'colIndex' in the transform buffer
column.offset = buffer.getColumnOffset(columnX);
if(blockIdx.y == 0) {
// topmost block - apply mirroring rules when loading first 3 rows
column.loader.init(sizeX, sizeY, firstX, firstY);
// load pixels in mirrored way
buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input);
buffer[column.offset + 0 * STRIDE] =
buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input);
} else {
// non-topmost row - regular loading:
column.loader.init(sizeX, sizeY, firstX, firstY - 1);
buffer[column.offset + 0 * STRIDE] = column.loader.loadHighFrom(input);
buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input);
buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input);
}
// Now, the next coefficient, which will be loaded by loader, is #2.
}
/// Actual GPU 5/3 RDWT implementation.
/// @tparam CHECKED_LOADS true if boundaries must be checked when reading
/// @tparam CHECKED_WRITES true if boundaries must be checked when writing
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
template<bool CHECKED_LOADS, bool CHECKED_WRITES>
__device__ void transform(const int * const in, int * const out,
const int sizeX, const int sizeY,
const int winSteps) {
// info about one main and one boundary column
RDWT53Column<CHECKED_LOADS> column, boundaryColumn;
// index of first row to be transformed
const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps;
// some threads initialize boundary columns
boundaryColumn.clear();
if(threadIdx.x < 3) {
// First 3 threads also handle boundary columns. Thread #0 gets right
// column #0, thread #1 get right column #1 and thread #2 left column.
const int colId = threadIdx.x + ((threadIdx.x != 2) ? WIN_SIZE_X : -3);
// Thread initializes offset of the boundary column (in shared
// buffer), first 3 pixels of the column and a loader for this column.
initColumn(colId, in, sizeX, sizeY, boundaryColumn, firstY);
}
// All threads initialize central columns.
initColumn(parityIdx<WIN_SIZE_X>(), in, sizeX, sizeY, column, firstY);
// horizontally transform first 3 rows
horizontalTransform(3, 0);
// writer of output pixels - initialize it
const int outX = blockIdx.x * WIN_SIZE_X + threadIdx.x;
VerticalDWTPixelWriter<int, CHECKED_WRITES> writer;
writer.init(sizeX, sizeY, outX, firstY);
// offset of column (in transform buffer) saved by this thread
const int outputColumnOffset = buffer.getColumnOffset(threadIdx.x);
// (Each iteration assumes that first 3 rows of transform buffer are
// already loaded with horizontally transformed pixels.)
for(int w = 0; w < winSteps; w++) {
// Load another WIN_SIZE_Y lines of this thread's column
// into the transform buffer.
loadWindowIntoColumn(in, column);
// possibly load boundary columns
if(threadIdx.x < 3) {
loadWindowIntoColumn(in, boundaryColumn);
}
// horizontally transform all newly loaded lines
horizontalTransform(WIN_SIZE_Y, 3);
// Using 3 registers, remember current values of last 3 rows
// of transform buffer. These rows are transformed horizontally
// only and will be used in next iteration.
int last3Lines[3];
last3Lines[0] = buffer[outputColumnOffset + (WIN_SIZE_Y + 0) * STRIDE];
last3Lines[1] = buffer[outputColumnOffset + (WIN_SIZE_Y + 1) * STRIDE];
last3Lines[2] = buffer[outputColumnOffset + (WIN_SIZE_Y + 2) * STRIDE];
// vertically transform all central columns
buffer.forEachVerticalOdd(outputColumnOffset, Reverse53Update());
buffer.forEachVerticalEven(outputColumnOffset, Reverse53Predict());
// Save all results of current window. Results are in transform buffer
// at rows from #1 to #(1 + WIN_SIZE_Y). Other rows are invalid now.
// (They only served as a boundary for vertical RDWT.)
for(int i = 1; i < (1 + WIN_SIZE_Y); i++) {
writer.writeInto(out, buffer[outputColumnOffset + i * STRIDE]);
}
// Use last 3 remembered lines as first 3 lines for next iteration.
// As expected, these lines are already horizontally transformed.
buffer[outputColumnOffset + 0 * STRIDE] = last3Lines[0];
buffer[outputColumnOffset + 1 * STRIDE] = last3Lines[1];
buffer[outputColumnOffset + 2 * STRIDE] = last3Lines[2];
// Wait for all writing threads before proceeding to loading new
// coeficients in next iteration. (Not to overwrite those which
// are not written yet.)
__syncthreads();
}
}
public:
/// Main GPU 5/3 RDWT entry point.
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
__device__ static void run(const int * const input, int * const output,
const int sx, const int sy, const int steps) {
// prepare instance with buffer in shared memory
__shared__ RDWT53<WIN_SIZE_X, WIN_SIZE_Y> rdwt53;
// Compute limits of this threadblock's block of pixels and use them to
// determine, whether this threadblock will have to deal with boundary.
// (1 in next expressions is for radius of impulse response of 5/3 RDWT.)
const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1;
const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1;
const bool atRightBoudary = maxX >= sx;
const bool atBottomBoudary = maxY >= sy;
// Select specialized version of code according to distance of this
// threadblock's pixels from image boundary.
if(atBottomBoudary) {
// near bottom boundary => check both writing and reading
rdwt53.transform<true, true>(input, output, sx, sy, steps);
} else if(atRightBoudary) {
// near right boundary only => check writing only
rdwt53.transform<false, true>(input, output, sx, sy, steps);
} else {
// no nearby boundary => check nothing
rdwt53.transform<false, false>(input, output, sx, sy, steps);
}
}
}; // end of class RDWT53
/// Main GPU 5/3 RDWT entry point.
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
template <int WIN_SX, int WIN_SY>
__launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(RDWT53<WIN_SX, WIN_SY>), 8))
__global__ void rdwt53Kernel(const int * const in, int * const out,
const int sx, const int sy, const int steps) {
RDWT53<WIN_SX, WIN_SY>::run(in, out, sx, sy, steps);
}
/// Only computes optimal number of sliding window steps,
/// number of threadblocks and then lanches the 5/3 RDWT kernel.
/// @tparam WIN_SX width of sliding window
/// @tparam WIN_SY height of sliding window
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
template <int WIN_SX, int WIN_SY>
void launchRDWT53Kernel (int * in, int * out, const int sx, const int sy) {
// compute optimal number of steps of each sliding window
const int steps = divRndUp(sy, 15 * WIN_SY);
// prepare grid size
dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps));
// finally transform this level
PERF_BEGIN
hipLaunchKernelGGL(( rdwt53Kernel<WIN_SX, WIN_SY>), dim3(gSize), dim3(WIN_SX), 0, 0, in, out, sx, sy, steps);
PERF_END(" RDWT53", sx, sy)
CudaDWTTester::checkLastKernelCall("RDWT 5/3 kernel");
}
/// Reverse 5/3 2D DWT. See common rules (above) for more details.
/// @param in Input DWT coefficients. Format described in common rules.
/// Will not be preserved (will be overwritten).
/// @param out output buffer on GPU - will contain original image
/// in normalized range [-128, 127].
/// @param sizeX width of input image (in pixels)
/// @param sizeY height of input image (in pixels)
/// @param levels number of recursive DWT levels
void rdwt53(int * in, int * out, int sizeX, int sizeY, int levels) {
if(levels > 1) {
// let this function recursively reverse transform deeper levels first
const int llSizeX = divRndUp(sizeX, 2);
const int llSizeY = divRndUp(sizeY, 2);
rdwt53(in, out, llSizeX, llSizeY, levels - 1);
// copy reverse transformed LL band from output back into the input
memCopy(in, out, llSizeX, llSizeY);
}
// select right width of kernel for the size of the image
if(sizeX >= 960) {
launchRDWT53Kernel<192, 8>(in, out, sizeX, sizeY);
} else if (sizeX >= 480) {
launchRDWT53Kernel<128, 8>(in, out, sizeX, sizeY);
} else {
launchRDWT53Kernel<64, 8>(in, out, sizeX, sizeY);
}
}
} // end of namespace dwt_cuda
| 82dcddd8da9f10c4d819495d86756f45a32cfff6.cu | ///
/// @file rdwt53.cu
/// @brief CUDA implementation of reverse 5/3 2D DWT.
/// @author Martin Jirman ([email protected])
/// @date 2011-02-04 14:19
///
///
/// Copyright (c) 2011 Martin Jirman
/// All rights reserved.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright
/// notice, this list of conditions and the following disclaimer in the
/// documentation and/or other materials provided with the distribution.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
///
#include "common.h"
#include "transform_buffer.h"
#include "io.h"
namespace dwt_cuda {
/// Wraps shared momory buffer and algorithms needed for computing 5/3 RDWT
/// using sliding window and lifting schema.
/// @tparam WIN_SIZE_X width of sliding window
/// @tparam WIN_SIZE_Y height of sliding window
template <int WIN_SIZE_X, int WIN_SIZE_Y>
class RDWT53 {
private:
/// Shared memory buffer used for 5/3 DWT transforms.
typedef TransformBuffer<int, WIN_SIZE_X, WIN_SIZE_Y + 3, 2> RDWT53Buffer;
/// Shared buffer used for reverse 5/3 DWT.
RDWT53Buffer buffer;
/// Difference between indices of two vertically neighboring items in buffer.
enum { STRIDE = RDWT53Buffer::VERTICAL_STRIDE };
/// Info needed for loading of one input column from input image.
/// @tparam CHECKED true if loader should check boundaries
template <bool CHECKED>
struct RDWT53Column {
/// loader of pixels from column in input image
VerticalDWTBandLoader<int, CHECKED> loader;
/// Offset of corresponding column in shared buffer.
int offset;
/// Sets all fields to some values to avoid 'uninitialized' warnings.
__device__ void clear() {
offset = 0;
loader.clear();
}
};
/// 5/3 DWT reverse update operation.
struct Reverse53Update {
__device__ void operator() (const int p, int & c, const int n) const {
c -= (p + n + 2) / 4; // F.3, page 118, ITU-T Rec. T.800 final draft
}
};
/// 5/3 DWT reverse predict operation.
struct Reverse53Predict {
__device__ void operator() (const int p, int & c, const int n) const {
c += (p + n) / 2; // F.4, page 118, ITU-T Rec. T.800 final draft
}
};
/// Horizontal 5/3 RDWT on specified lines of transform buffer.
/// @param lines number of lines to be transformed
/// @param firstLine index of the first line to be transformed
__device__ void horizontalTransform(const int lines, const int firstLine) {
__syncthreads();
buffer.forEachHorizontalEven(firstLine, lines, Reverse53Update());
__syncthreads();
buffer.forEachHorizontalOdd(firstLine, lines, Reverse53Predict());
__syncthreads();
}
/// Using given loader, it loads another WIN_SIZE_Y coefficients
/// into specified column.
/// @tparam CHECKED true if loader should check image boundaries
/// @param input input coefficients to load from
/// @param col info about loaded column
template <bool CHECKED>
inline __device__ void loadWindowIntoColumn(const int * const input,
RDWT53Column<CHECKED> & col) {
for(int i = 3; i < (3 + WIN_SIZE_Y); i += 2) {
buffer[col.offset + i * STRIDE] = col.loader.loadLowFrom(input);
buffer[col.offset + (i + 1) * STRIDE] = col.loader.loadHighFrom(input);
}
}
/// Initializes one column of shared transform buffer with 7 input pixels.
/// Those 7 pixels will not be transformed. Also initializes given loader.
/// @tparam CHECKED true if loader should check image boundaries
/// @param columnX x coordinate of column in shared transform buffer
/// @param input input image
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param loader (uninitialized) info about loaded column
template <bool CHECKED>
__device__ void initColumn(const int columnX, const int * const input,
const int sizeX, const int sizeY,
RDWT53Column<CHECKED> & column,
const int firstY) {
// coordinates of the first coefficient to be loaded
const int firstX = blockIdx.x * WIN_SIZE_X + columnX;
// offset of the column with index 'colIndex' in the transform buffer
column.offset = buffer.getColumnOffset(columnX);
if(blockIdx.y == 0) {
// topmost block - apply mirroring rules when loading first 3 rows
column.loader.init(sizeX, sizeY, firstX, firstY);
// load pixels in mirrored way
buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input);
buffer[column.offset + 0 * STRIDE] =
buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input);
} else {
// non-topmost row - regular loading:
column.loader.init(sizeX, sizeY, firstX, firstY - 1);
buffer[column.offset + 0 * STRIDE] = column.loader.loadHighFrom(input);
buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input);
buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input);
}
// Now, the next coefficient, which will be loaded by loader, is #2.
}
/// Actual GPU 5/3 RDWT implementation.
/// @tparam CHECKED_LOADS true if boundaries must be checked when reading
/// @tparam CHECKED_WRITES true if boundaries must be checked when writing
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
template<bool CHECKED_LOADS, bool CHECKED_WRITES>
__device__ void transform(const int * const in, int * const out,
const int sizeX, const int sizeY,
const int winSteps) {
// info about one main and one boundary column
RDWT53Column<CHECKED_LOADS> column, boundaryColumn;
// index of first row to be transformed
const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps;
// some threads initialize boundary columns
boundaryColumn.clear();
if(threadIdx.x < 3) {
// First 3 threads also handle boundary columns. Thread #0 gets right
// column #0, thread #1 get right column #1 and thread #2 left column.
const int colId = threadIdx.x + ((threadIdx.x != 2) ? WIN_SIZE_X : -3);
// Thread initializes offset of the boundary column (in shared
// buffer), first 3 pixels of the column and a loader for this column.
initColumn(colId, in, sizeX, sizeY, boundaryColumn, firstY);
}
// All threads initialize central columns.
initColumn(parityIdx<WIN_SIZE_X>(), in, sizeX, sizeY, column, firstY);
// horizontally transform first 3 rows
horizontalTransform(3, 0);
// writer of output pixels - initialize it
const int outX = blockIdx.x * WIN_SIZE_X + threadIdx.x;
VerticalDWTPixelWriter<int, CHECKED_WRITES> writer;
writer.init(sizeX, sizeY, outX, firstY);
// offset of column (in transform buffer) saved by this thread
const int outputColumnOffset = buffer.getColumnOffset(threadIdx.x);
// (Each iteration assumes that first 3 rows of transform buffer are
// already loaded with horizontally transformed pixels.)
for(int w = 0; w < winSteps; w++) {
// Load another WIN_SIZE_Y lines of this thread's column
// into the transform buffer.
loadWindowIntoColumn(in, column);
// possibly load boundary columns
if(threadIdx.x < 3) {
loadWindowIntoColumn(in, boundaryColumn);
}
// horizontally transform all newly loaded lines
horizontalTransform(WIN_SIZE_Y, 3);
// Using 3 registers, remember current values of last 3 rows
// of transform buffer. These rows are transformed horizontally
// only and will be used in next iteration.
int last3Lines[3];
last3Lines[0] = buffer[outputColumnOffset + (WIN_SIZE_Y + 0) * STRIDE];
last3Lines[1] = buffer[outputColumnOffset + (WIN_SIZE_Y + 1) * STRIDE];
last3Lines[2] = buffer[outputColumnOffset + (WIN_SIZE_Y + 2) * STRIDE];
// vertically transform all central columns
buffer.forEachVerticalOdd(outputColumnOffset, Reverse53Update());
buffer.forEachVerticalEven(outputColumnOffset, Reverse53Predict());
// Save all results of current window. Results are in transform buffer
// at rows from #1 to #(1 + WIN_SIZE_Y). Other rows are invalid now.
// (They only served as a boundary for vertical RDWT.)
for(int i = 1; i < (1 + WIN_SIZE_Y); i++) {
writer.writeInto(out, buffer[outputColumnOffset + i * STRIDE]);
}
// Use last 3 remembered lines as first 3 lines for next iteration.
// As expected, these lines are already horizontally transformed.
buffer[outputColumnOffset + 0 * STRIDE] = last3Lines[0];
buffer[outputColumnOffset + 1 * STRIDE] = last3Lines[1];
buffer[outputColumnOffset + 2 * STRIDE] = last3Lines[2];
// Wait for all writing threads before proceeding to loading new
// coeficients in next iteration. (Not to overwrite those which
// are not written yet.)
__syncthreads();
}
}
public:
/// Main GPU 5/3 RDWT entry point.
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
__device__ static void run(const int * const input, int * const output,
const int sx, const int sy, const int steps) {
// prepare instance with buffer in shared memory
__shared__ RDWT53<WIN_SIZE_X, WIN_SIZE_Y> rdwt53;
// Compute limits of this threadblock's block of pixels and use them to
// determine, whether this threadblock will have to deal with boundary.
// (1 in next expressions is for radius of impulse response of 5/3 RDWT.)
const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1;
const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1;
const bool atRightBoudary = maxX >= sx;
const bool atBottomBoudary = maxY >= sy;
// Select specialized version of code according to distance of this
// threadblock's pixels from image boundary.
if(atBottomBoudary) {
// near bottom boundary => check both writing and reading
rdwt53.transform<true, true>(input, output, sx, sy, steps);
} else if(atRightBoudary) {
// near right boundary only => check writing only
rdwt53.transform<false, true>(input, output, sx, sy, steps);
} else {
// no nearby boundary => check nothing
rdwt53.transform<false, false>(input, output, sx, sy, steps);
}
}
}; // end of class RDWT53
/// Main GPU 5/3 RDWT entry point.
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
template <int WIN_SX, int WIN_SY>
__launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(RDWT53<WIN_SX, WIN_SY>), 8))
__global__ void rdwt53Kernel(const int * const in, int * const out,
const int sx, const int sy, const int steps) {
RDWT53<WIN_SX, WIN_SY>::run(in, out, sx, sy, steps);
}
/// Only computes optimal number of sliding window steps,
/// number of threadblocks and then lanches the 5/3 RDWT kernel.
/// @tparam WIN_SX width of sliding window
/// @tparam WIN_SY height of sliding window
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
template <int WIN_SX, int WIN_SY>
void launchRDWT53Kernel (int * in, int * out, const int sx, const int sy) {
// compute optimal number of steps of each sliding window
const int steps = divRndUp(sy, 15 * WIN_SY);
// prepare grid size
dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps));
// finally transform this level
PERF_BEGIN
rdwt53Kernel<WIN_SX, WIN_SY><<<gSize, WIN_SX>>>(in, out, sx, sy, steps);
PERF_END(" RDWT53", sx, sy)
CudaDWTTester::checkLastKernelCall("RDWT 5/3 kernel");
}
/// Reverse 5/3 2D DWT. See common rules (above) for more details.
/// @param in Input DWT coefficients. Format described in common rules.
/// Will not be preserved (will be overwritten).
/// @param out output buffer on GPU - will contain original image
/// in normalized range [-128, 127].
/// @param sizeX width of input image (in pixels)
/// @param sizeY height of input image (in pixels)
/// @param levels number of recursive DWT levels
void rdwt53(int * in, int * out, int sizeX, int sizeY, int levels) {
if(levels > 1) {
// let this function recursively reverse transform deeper levels first
const int llSizeX = divRndUp(sizeX, 2);
const int llSizeY = divRndUp(sizeY, 2);
rdwt53(in, out, llSizeX, llSizeY, levels - 1);
// copy reverse transformed LL band from output back into the input
memCopy(in, out, llSizeX, llSizeY);
}
// select right width of kernel for the size of the image
if(sizeX >= 960) {
launchRDWT53Kernel<192, 8>(in, out, sizeX, sizeY);
} else if (sizeX >= 480) {
launchRDWT53Kernel<128, 8>(in, out, sizeX, sizeY);
} else {
launchRDWT53Kernel<64, 8>(in, out, sizeX, sizeY);
}
}
} // end of namespace dwt_cuda
|
029e7739079140ca66ae9203818d634c15817643.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018-2019, Michael P. Howard
// This file is part of the azplugins project, released under the Modified BSD License.
// Maintainer: mphoward / Everyone is free to add additional potentials
/*!
* \file AnisoPairPotentials.cu
* \brief Defines the driver functions for computing pair forces on the GPU
*
* Each pair potential evaluator needs to have an explicit instantiation of the
* compute_aniso_pair_potential.
*/
#include "AnisoPairPotentials.cuh"
namespace azplugins
{
namespace gpu
{
//! Kernel driver for Two-patch Morse anisotropic pair potential
template hipError_t compute_aniso_pair_potential<azplugins::detail::AnisoPairEvaluatorTwoPatchMorse>
(const a_pair_args_t& pair_args,
const typename azplugins::detail::AnisoPairEvaluatorTwoPatchMorse::param_type *d_params);
} // end namespace gpu
} // end namespace azplugins
| 029e7739079140ca66ae9203818d634c15817643.cu | // Copyright (c) 2018-2019, Michael P. Howard
// This file is part of the azplugins project, released under the Modified BSD License.
// Maintainer: mphoward / Everyone is free to add additional potentials
/*!
* \file AnisoPairPotentials.cu
* \brief Defines the driver functions for computing pair forces on the GPU
*
* Each pair potential evaluator needs to have an explicit instantiation of the
* compute_aniso_pair_potential.
*/
#include "AnisoPairPotentials.cuh"
namespace azplugins
{
namespace gpu
{
//! Kernel driver for Two-patch Morse anisotropic pair potential
template cudaError_t compute_aniso_pair_potential<azplugins::detail::AnisoPairEvaluatorTwoPatchMorse>
(const a_pair_args_t& pair_args,
const typename azplugins::detail::AnisoPairEvaluatorTwoPatchMorse::param_type *d_params);
} // end namespace gpu
} // end namespace azplugins
|
217466812b0970ed1170985583df9a796e425a9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
using namespace cv;
using namespace std;
Mat img;
Mat new_img;
int radio, NUM_THREADS;
// CUDA API error checking macro
static void handleError( hipError_t err, const char *file, int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define cudaCheck( err ) (handleError( err, __FILE__, __LINE__ ))
//Funcion que se realiza en loshilos para generar el efecto borroso
__global__ void blur(int *r_in, int *r_out,int *g_in, int *g_out,int *b_in, int *b_out, int radio, int numthreads, int numblocks, int largo) {
int gindex = threadIdx.x + (blockIdx.x * blockDim.x);
int aux = largo/(numthreads*numblocks)+1;
for (int i=gindex*aux; i<(gindex+1)*(aux); i++){
int aux_r = 0, aux_g = 0, aux_b = 0, count=0;
for (int a=0; a<=radio; a++){
if(!(i-a<0)){
aux_r+= r_in[i-a];
aux_g+= g_in[i-a];
aux_b+= b_in[i-a];
count++;
}
if(i+a<largo){
aux_r+= r_in[i+a];
aux_g+= g_in[i+a];
aux_b+= b_in[i+a];
count++;
}
}
//promedio de pixeles sumados
aux_r = int(aux_r/(count));
aux_g = int(aux_g/(count));
aux_b = int(aux_b/(count));
//guardado del valor a enviar al host del nuevo valor del pixel
r_out[i] = aux_r;
g_out[i] = aux_g;
b_out[i] = aux_b;
}
}
//Funcion main
int main( int argc, char** argv )
{
NUM_THREADS = atoi(argv[3]);
int num_blocks = atoi(argv[4]);
int j, k;
//Abrir las imagenes y guardarlas en memoria
img = imread(argv[1], CV_LOAD_IMAGE_UNCHANGED);
new_img = imread(argv[1], CV_LOAD_IMAGE_UNCHANGED);
if (img.empty()){
cout << "Error : Image cannot be loaded..!!" << endl;
return -1;
}
int h_r_in[img.cols],h_r_out[img.cols],h_g_in[img.cols],h_g_out[img.cols],h_b_in[img.cols],h_b_out[img.cols];
//variables de device
int *d_r_in, *d_r_out,*d_g_in, *d_g_out,*d_b_in, *d_b_out;
radio=atoi(argv[2]);
int largo=img.cols;
//Reserva de recursos en device
hipMalloc( (void **) &d_r_in, img.cols * sizeof(int));
hipMalloc( (void **) &d_r_out, img.cols * sizeof(int));
hipMalloc( (void **) &d_g_in, img.cols * sizeof(int));
hipMalloc( (void **) &d_g_out, img.cols * sizeof(int));
hipMalloc( (void **) &d_b_in, img.cols * sizeof(int));
hipMalloc( (void **) &d_b_out, img.cols * sizeof(int));
//k recorre fila por fila
for(j=0;j<img.rows;j++){
//asigna los valores de la fila actual en el host
for(k=0;k<img.cols;k++){
h_r_in[k] = int(img.at<Vec3b>(j,k)[0]);
h_g_in[k] = int(img.at<Vec3b>(j,k)[1]);
h_b_in[k] = int(img.at<Vec3b>(j,k)[2]);
}
//envia los valores de la fila actual del host al device
cudaCheck( hipMemcpy( d_r_in, h_r_in, img.cols * sizeof(int), hipMemcpyHostToDevice));
cudaCheck( hipMemcpy( d_g_in, h_g_in, img.cols * sizeof(int), hipMemcpyHostToDevice));
cudaCheck( hipMemcpy( d_b_in, h_b_in, img.cols * sizeof(int), hipMemcpyHostToDevice));
//ejecuta el stencil
hipLaunchKernelGGL(( blur), dim3(num_blocks),dim3(NUM_THREADS), 0, 0, d_r_in,d_r_out,d_g_in,d_g_out,d_b_in,d_b_out, radio, NUM_THREADS, num_blocks, largo);
//guarda en el host los valores generados por el stencil
hipMemcpy( h_r_out, d_r_out, img.cols * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy( h_g_out, d_g_out, img.cols * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy( h_b_out, d_b_out, img.cols * sizeof(int), hipMemcpyDeviceToHost);
//recorre la fila actual y le asigna los nuevos valores rgb
for(k=0;k<img.cols;k++){
new_img.at<Vec3b>(j,k)[0] = h_r_out[k];
new_img.at<Vec3b>(j,k)[1] = h_g_out[k];
new_img.at<Vec3b>(j,k)[2] = h_b_out[k];
}
}
string name = "modificada_";
name.append("kernel_");
name.append(argv[2]);
name.append("_");
name.append(argv[1]);
//Guardar la imagen
imwrite(name, new_img);
return 0;
}
| 217466812b0970ed1170985583df9a796e425a9e.cu | #include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <stdio.h>
#include <cuda.h>
using namespace cv;
using namespace std;
Mat img;
Mat new_img;
int radio, NUM_THREADS;
// CUDA API error checking macro
static void handleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define cudaCheck( err ) (handleError( err, __FILE__, __LINE__ ))
//Funcion que se realiza en loshilos para generar el efecto borroso
__global__ void blur(int *r_in, int *r_out,int *g_in, int *g_out,int *b_in, int *b_out, int radio, int numthreads, int numblocks, int largo) {
int gindex = threadIdx.x + (blockIdx.x * blockDim.x);
int aux = largo/(numthreads*numblocks)+1;
for (int i=gindex*aux; i<(gindex+1)*(aux); i++){
int aux_r = 0, aux_g = 0, aux_b = 0, count=0;
for (int a=0; a<=radio; a++){
if(!(i-a<0)){
aux_r+= r_in[i-a];
aux_g+= g_in[i-a];
aux_b+= b_in[i-a];
count++;
}
if(i+a<largo){
aux_r+= r_in[i+a];
aux_g+= g_in[i+a];
aux_b+= b_in[i+a];
count++;
}
}
//promedio de pixeles sumados
aux_r = int(aux_r/(count));
aux_g = int(aux_g/(count));
aux_b = int(aux_b/(count));
//guardado del valor a enviar al host del nuevo valor del pixel
r_out[i] = aux_r;
g_out[i] = aux_g;
b_out[i] = aux_b;
}
}
//Funcion main
int main( int argc, char** argv )
{
NUM_THREADS = atoi(argv[3]);
int num_blocks = atoi(argv[4]);
int j, k;
//Abrir las imagenes y guardarlas en memoria
img = imread(argv[1], CV_LOAD_IMAGE_UNCHANGED);
new_img = imread(argv[1], CV_LOAD_IMAGE_UNCHANGED);
if (img.empty()){
cout << "Error : Image cannot be loaded..!!" << endl;
return -1;
}
int h_r_in[img.cols],h_r_out[img.cols],h_g_in[img.cols],h_g_out[img.cols],h_b_in[img.cols],h_b_out[img.cols];
//variables de device
int *d_r_in, *d_r_out,*d_g_in, *d_g_out,*d_b_in, *d_b_out;
radio=atoi(argv[2]);
int largo=img.cols;
//Reserva de recursos en device
cudaMalloc( (void **) &d_r_in, img.cols * sizeof(int));
cudaMalloc( (void **) &d_r_out, img.cols * sizeof(int));
cudaMalloc( (void **) &d_g_in, img.cols * sizeof(int));
cudaMalloc( (void **) &d_g_out, img.cols * sizeof(int));
cudaMalloc( (void **) &d_b_in, img.cols * sizeof(int));
cudaMalloc( (void **) &d_b_out, img.cols * sizeof(int));
//k recorre fila por fila
for(j=0;j<img.rows;j++){
//asigna los valores de la fila actual en el host
for(k=0;k<img.cols;k++){
h_r_in[k] = int(img.at<Vec3b>(j,k)[0]);
h_g_in[k] = int(img.at<Vec3b>(j,k)[1]);
h_b_in[k] = int(img.at<Vec3b>(j,k)[2]);
}
//envia los valores de la fila actual del host al device
cudaCheck( cudaMemcpy( d_r_in, h_r_in, img.cols * sizeof(int), cudaMemcpyHostToDevice));
cudaCheck( cudaMemcpy( d_g_in, h_g_in, img.cols * sizeof(int), cudaMemcpyHostToDevice));
cudaCheck( cudaMemcpy( d_b_in, h_b_in, img.cols * sizeof(int), cudaMemcpyHostToDevice));
//ejecuta el stencil
blur<<<num_blocks,NUM_THREADS>>> (d_r_in,d_r_out,d_g_in,d_g_out,d_b_in,d_b_out, radio, NUM_THREADS, num_blocks, largo);
//guarda en el host los valores generados por el stencil
cudaMemcpy( h_r_out, d_r_out, img.cols * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy( h_g_out, d_g_out, img.cols * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy( h_b_out, d_b_out, img.cols * sizeof(int), cudaMemcpyDeviceToHost);
//recorre la fila actual y le asigna los nuevos valores rgb
for(k=0;k<img.cols;k++){
new_img.at<Vec3b>(j,k)[0] = h_r_out[k];
new_img.at<Vec3b>(j,k)[1] = h_g_out[k];
new_img.at<Vec3b>(j,k)[2] = h_b_out[k];
}
}
string name = "modificada_";
name.append("kernel_");
name.append(argv[2]);
name.append("_");
name.append(argv[1]);
//Guardar la imagen
imwrite(name, new_img);
return 0;
}
|
9595148f4a0b697c48caf7f02b580f509a0c32d6.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
#include "stdio.h"
#include "defines.h"
__constant__ float pi = 3.14159265359f;
__constant__ float pi2 = 3.14159265359f/2;
__constant__ float pi4 = 3.14159265359f / 4;
__constant__ float pi34 = 3* 3.14159265359f / 4;
__constant__ float gaussianMatrix[49] = { 0.00000067, 0.00002292, 0.00019117, 0.00038771, 0.00019117, 0.00002292, 0.00000067,
0.00002292, 0.00078634, 0.00655965, 0.01330373, 0.00655965, 0.00078633, 0.00002292,
0.00019117, 0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965, 0.00019117,
0.00038771, 0.01330373, 0.11098164, 0.22508352, 0.11098164, 0.01330373, 0.00038771,
0.00019117, 0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965, 0.00019117,
0.00002292, 0.00078633, 0.00655965, 0.01330373, 0.00655965, 0.00078633, 0.00002292,
0.00000067, 0.00002292, 0.00019117, 0.00038771, 0.00019117, 0.00002292, 0.00000067 };
__global__ void kernelApply3x3MatrixOnImage(int* input, int* output, float* mat, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float s_mat[9];
if (x >= width || y >= height)
return;
if (threadIdx.x == 0 && threadIdx.y == 0){
s_mat[0] = mat[0];
s_mat[1] = mat[1];
s_mat[2] = mat[2];
s_mat[3] = mat[3];
s_mat[4] = mat[4];
s_mat[5] = mat[5];
s_mat[6] = mat[6];
s_mat[7] = mat[7];
s_mat[8] = mat[8];
}
#ifdef DEBUG_OUTPUT
if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0){
printf("Applied Matrix:\n");
printf("%g, ", mat[0]);
printf("%g, ", mat[1]);
printf("%g\n", mat[2]);
printf("%g, ", mat[3]);
printf("%g, ", mat[4]);
printf("%g\n", mat[5]);
printf("%g, ", mat[6]);
printf("%g, ", mat[7]);
printf("%g\n", mat[8]);
}
#endif
unsigned int pos = x + y*width;
int val = input[pos];
__syncthreads();
//numbers dictate the center of the matrix in coords relative to midpoint. each thread writes "his value" multiplied with factor in the realtive centerpoint of the matrix
if (y > 0)
{
//-1 -1
if (x > 0)
atomicAdd(&output[pos - width - 1], val * s_mat[8]);
//0 -1
atomicAdd(&output[pos - width], val * s_mat[7]);
//1 -1
if (x < width - 1)
atomicAdd(&output[pos - width + 1], val * s_mat[6]);
}
//-1 0
if (x > 0)
atomicAdd(&output[pos - 1], val * s_mat[5]);
//0 0
atomicAdd(&output[pos], val * s_mat[4]);
//1 0
if (x < width - 1)
atomicAdd(&output[pos + 1], val * s_mat[3]);
if (y < height - 1)
{
//-1 1
if (x > 0)
atomicAdd(&output[pos + width - 1], val * s_mat[2]);
//0 1
atomicAdd(&output[pos + width], val * s_mat[1]);
//1 1
if (x < width - 1)
atomicAdd(&output[pos + width + 1], val * s_mat[0]);
}
}
//rounds input value to cloest value of pi/4,pi/2,3pi/4,0pi; expects input value between [0,pi]
__device__ void kernelRoundToClosestAngle(float input,float* res) {
float val[4] = { 0, pi / 4, pi / 2, 3 * pi / 4 };
float diff = input; //initialize with diff to 0
float closest = 0; //initial return value as closest is 0
for (unsigned int i = 1; i < 4; ++i)
{
if (abs(input - val[i]) < diff)
{
closest = val[i];
diff = abs(input - val[i]);
}
}
res[0] = closest;
}
//blurs an image input of dim w*h with gaussian blur and returns in output https://en.wikipedia.org/wiki/Gaussian_blur
__global__ void kernelGaussianBlur(float* input, float* output, unsigned int w, unsigned int h)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ size_t maxpos;
if (x >= w || y >= h)
return;
float result = 0;
int pos = 0;
if (threadIdx.x == 0 && threadIdx.y == 0){
maxpos = w*h;
}
/*if (x == 0 && y == 0){
printf("maxpos: %d\n", maxpos);
printf("w:%d,h:%d\n", w, h);
}*/
__syncthreads();
for (int j = -3; j < 4; ++j)//y-coord
{
for (int i = -3; i < 4; ++i){//x-coord
pos = (x + i) + (y + j)*w;
/*if (x == 100 && y == 100){
printf("i:%d,j:%d\n", i, j);
printf("pos: %d\n", pos);
}*/
if (pos < 0 || pos >= maxpos || x+i < 0 || x+i >= w || y+j <0 || y+j >=h){ //edge of the image
/*if (x == 100 && y == 100){
printf("continue\n");
}*/
continue;
}
/*if (x == 100 && y == 100)
printf("old res: %f\n", result);*/
result += gaussianMatrix[(i+3)+(j+3)*7]*input[pos];
/*if (x == 100 && y == 100)
{
printf("MatrixPos:%d\n", (i + 3) + (j + 3) * 7);
printf("%f * %f = %f\n", gaussianMatrix[(i + 3) + (j + 3) * 7], input[pos], input[pos] * gaussianMatrix[(i + 3) + (j + 3) * 7]);
printf("new res: %f\n", result);
}*/
}
}
/*if (x == 100 && y ==100 )
printf("result: %f\n", result);*/
output[x + y*w] = result;
/*if (x == 100 && y ==100)
printf("output: %f\n", output[x + y*w]);*/
}
__global__ void kernelAtan2FixedAngles(int* input_x, int* input_y, float* output, unsigned int width, unsigned int height) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
float angle = atan2((float)input_x[x + y*width], (float)input_y[x + y*width]);
kernelRoundToClosestAngle(angle, &(output[x + y*width]));
}
/*
__global__ void kernelRoundToClosestAngle(float input, float* res){
kernelRoundToClosestAngle(input, res);
}*/
/*
checks on x,y if value is above threshold. if above, we find all points which this point is part of a circle with radius r and add value to the scoring. we repeat this for different radi.
all points in result array which have big enough score are considered as midpoint of circle
*/
__global__ void findCirclesWithRadius(float* input, unsigned int* midpointSum, float above_threshold, unsigned int r, unsigned int w, unsigned int h){
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= w || y >= h)
return;
if (input[x + y*w] < above_threshold){ //return if value is < threshold -> wont participate in any circle
return;
}
//extern __shared__ unsigned int midpointSum[];//shared result array
//for (unsigned int r = min_radius; r < max_radius; ++r)
float r2 = r*r;
for (int xoff = 0; xoff <= r; ++xoff) //go from 0 to r; a ist the x-offset-coord, we calculate the y offset coord with sqrt(r*r-a*a)
{
float yoff = sqrt(r2 - xoff*xoff);
if (x == 20 && y == 20)
{
}
if ((int)x + xoff < w && (int)x + xoff >= 0 && (int)y + yoff < h && (int)y + yoff >= 0){
//printf("1\n");
//printf("1:x:%d; y:%d; xoff:%d; yoff:%d; r:%d adding to %d\n ", x, y, xoff, (int)yoff, r, x + xoff + (y + (int)yoff)*w);
atomicAdd(&midpointSum[x + xoff + (y + (int)yoff)*w], 1);
}
if ((int)x - xoff < w && (int)x - xoff >= 0 && (int)y + yoff < h && (int)y + yoff >= 0){
//printf("2\n");
//printf("2:x:%d; y:%d; xoff:%d; yoff:%d; r:%d adding to %d\n ", x, y, xoff, (int)yoff, r, x + xoff + (y + (int)yoff)*w);
atomicAdd(&midpointSum[x - xoff + (y + (int)yoff)*w], 1);
}
if ((int)x + xoff < w && (int)x + xoff >= 0 && (int)y - yoff < h && (int)y - yoff >= 0){
//printf("3\n");
//printf("3:x:%d; y:%d; xoff:%d; yoff:%d; r:%d adding to %d\n ", x, y, xoff, (int)yoff, r, x + xoff + (y + (int)yoff)*w);
atomicAdd(&midpointSum[x + xoff + (y - (int)yoff)*w], 1);
}
if ((int)x - xoff < w && (int)x - xoff >= 0 && (int)y - yoff < h && (int)y - yoff >= 0){
//printf("4\n");
//printf("4:x:%d; y:%d; xoff:%d; yoff:%d; r:%d adding to %d\n ", x, y, xoff, (int)yoff, r, x + xoff + (y + (int)yoff)*w);
atomicAdd(&midpointSum[x - xoff + (y - (int)yoff)*w], 1);
}
}
/*
if (midpointSum[x + y*w]>0)
{
rating[x + y*w] = 255;
printf("(%d|%d)->midpointSum: %d rating:%f \n", x,y, midpointSum[x + y*w], rating[x + y*w]);
}*/
//if (midpointSum[x + y*w]>(r + 1)) //if a point is the midpoint for at least a half circle it should be considered as midpoint
//rating[x + y*w] = midpointSum[x + y*w]*;
//else
//rating[x + y*w] = 0;
}
__global__ void circleMidpointAnalysis(float* rating, unsigned int* midpointSum,unsigned int used_radius, unsigned int w, unsigned int h){
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= w || y >= h)
return;
//if (midpointSum[x + y*w]>0)
{
// rating[x + y*w] = 255;
}
if (midpointSum[x + y*w] > (1.7 * used_radius + 1)) //if a point is the midpoint for at least a half circle it should be considered as midpoint
rating[x + y*w] = midpointSum[x + y*w];
else
rating[x + y*w] = 0;
}
//combines 2 images in image1
__global__ void kernelCombineImagesAndRemoveBelowThreshold(float* image1, float* image2, float threshold, unsigned int w, unsigned int h) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= w || y >= h)
return;
image1[x + y*w] += image2[x + y*w];
if (image1[x + y*w] < threshold)
image1[x + y*w] = 0;
}
| 9595148f4a0b697c48caf7f02b580f509a0c32d6.cu | #pragma once
#include "cuda.h"
#include "device_functions.h"
#include "stdio.h"
#include "defines.h"
__constant__ float pi = 3.14159265359f;
__constant__ float pi2 = 3.14159265359f/2;
__constant__ float pi4 = 3.14159265359f / 4;
__constant__ float pi34 = 3* 3.14159265359f / 4;
__constant__ float gaussianMatrix[49] = { 0.00000067, 0.00002292, 0.00019117, 0.00038771, 0.00019117, 0.00002292, 0.00000067,
0.00002292, 0.00078634, 0.00655965, 0.01330373, 0.00655965, 0.00078633, 0.00002292,
0.00019117, 0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965, 0.00019117,
0.00038771, 0.01330373, 0.11098164, 0.22508352, 0.11098164, 0.01330373, 0.00038771,
0.00019117, 0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965, 0.00019117,
0.00002292, 0.00078633, 0.00655965, 0.01330373, 0.00655965, 0.00078633, 0.00002292,
0.00000067, 0.00002292, 0.00019117, 0.00038771, 0.00019117, 0.00002292, 0.00000067 };
__global__ void kernelApply3x3MatrixOnImage(int* input, int* output, float* mat, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float s_mat[9];
if (x >= width || y >= height)
return;
if (threadIdx.x == 0 && threadIdx.y == 0){
s_mat[0] = mat[0];
s_mat[1] = mat[1];
s_mat[2] = mat[2];
s_mat[3] = mat[3];
s_mat[4] = mat[4];
s_mat[5] = mat[5];
s_mat[6] = mat[6];
s_mat[7] = mat[7];
s_mat[8] = mat[8];
}
#ifdef DEBUG_OUTPUT
if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0){
printf("Applied Matrix:\n");
printf("%g, ", mat[0]);
printf("%g, ", mat[1]);
printf("%g\n", mat[2]);
printf("%g, ", mat[3]);
printf("%g, ", mat[4]);
printf("%g\n", mat[5]);
printf("%g, ", mat[6]);
printf("%g, ", mat[7]);
printf("%g\n", mat[8]);
}
#endif
unsigned int pos = x + y*width;
int val = input[pos];
__syncthreads();
//numbers dictate the center of the matrix in coords relative to midpoint. each thread writes "his value" multiplied with factor in the realtive centerpoint of the matrix
if (y > 0)
{
//-1 -1
if (x > 0)
atomicAdd(&output[pos - width - 1], val * s_mat[8]);
//0 -1
atomicAdd(&output[pos - width], val * s_mat[7]);
//1 -1
if (x < width - 1)
atomicAdd(&output[pos - width + 1], val * s_mat[6]);
}
//-1 0
if (x > 0)
atomicAdd(&output[pos - 1], val * s_mat[5]);
//0 0
atomicAdd(&output[pos], val * s_mat[4]);
//1 0
if (x < width - 1)
atomicAdd(&output[pos + 1], val * s_mat[3]);
if (y < height - 1)
{
//-1 1
if (x > 0)
atomicAdd(&output[pos + width - 1], val * s_mat[2]);
//0 1
atomicAdd(&output[pos + width], val * s_mat[1]);
//1 1
if (x < width - 1)
atomicAdd(&output[pos + width + 1], val * s_mat[0]);
}
}
//rounds input value to cloest value of pi/4,pi/2,3pi/4,0pi; expects input value between [0,pi]
__device__ void kernelRoundToClosestAngle(float input,float* res) {
float val[4] = { 0, pi / 4, pi / 2, 3 * pi / 4 };
float diff = input; //initialize with diff to 0
float closest = 0; //initial return value as closest is 0
for (unsigned int i = 1; i < 4; ++i)
{
if (abs(input - val[i]) < diff)
{
closest = val[i];
diff = abs(input - val[i]);
}
}
res[0] = closest;
}
//blurs an image input of dim w*h with gaussian blur and returns in output https://en.wikipedia.org/wiki/Gaussian_blur
__global__ void kernelGaussianBlur(float* input, float* output, unsigned int w, unsigned int h)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ size_t maxpos;
if (x >= w || y >= h)
return;
float result = 0;
int pos = 0;
if (threadIdx.x == 0 && threadIdx.y == 0){
maxpos = w*h;
}
/*if (x == 0 && y == 0){
printf("maxpos: %d\n", maxpos);
printf("w:%d,h:%d\n", w, h);
}*/
__syncthreads();
for (int j = -3; j < 4; ++j)//y-coord
{
for (int i = -3; i < 4; ++i){//x-coord
pos = (x + i) + (y + j)*w;
/*if (x == 100 && y == 100){
printf("i:%d,j:%d\n", i, j);
printf("pos: %d\n", pos);
}*/
if (pos < 0 || pos >= maxpos || x+i < 0 || x+i >= w || y+j <0 || y+j >=h){ //edge of the image
/*if (x == 100 && y == 100){
printf("continue\n");
}*/
continue;
}
/*if (x == 100 && y == 100)
printf("old res: %f\n", result);*/
result += gaussianMatrix[(i+3)+(j+3)*7]*input[pos];
/*if (x == 100 && y == 100)
{
printf("MatrixPos:%d\n", (i + 3) + (j + 3) * 7);
printf("%f * %f = %f\n", gaussianMatrix[(i + 3) + (j + 3) * 7], input[pos], input[pos] * gaussianMatrix[(i + 3) + (j + 3) * 7]);
printf("new res: %f\n", result);
}*/
}
}
/*if (x == 100 && y ==100 )
printf("result: %f\n", result);*/
output[x + y*w] = result;
/*if (x == 100 && y ==100)
printf("output: %f\n", output[x + y*w]);*/
}
__global__ void kernelAtan2FixedAngles(int* input_x, int* input_y, float* output, unsigned int width, unsigned int height) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
float angle = atan2((float)input_x[x + y*width], (float)input_y[x + y*width]);
kernelRoundToClosestAngle(angle, &(output[x + y*width]));
}
/*
__global__ void kernelRoundToClosestAngle(float input, float* res){
kernelRoundToClosestAngle(input, res);
}*/
/*
checks on x,y if value is above threshold. if above, we find all points which this point is part of a circle with radius r and add value to the scoring. we repeat this for different radi.
all points in result array which have big enough score are considered as midpoint of circle
*/
__global__ void findCirclesWithRadius(float* input, unsigned int* midpointSum, float above_threshold, unsigned int r, unsigned int w, unsigned int h){
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= w || y >= h)
return;
if (input[x + y*w] < above_threshold){ //return if value is < threshold -> wont participate in any circle
return;
}
//extern __shared__ unsigned int midpointSum[];//shared result array
//for (unsigned int r = min_radius; r < max_radius; ++r)
float r2 = r*r;
for (int xoff = 0; xoff <= r; ++xoff) //go from 0 to r; a ist the x-offset-coord, we calculate the y offset coord with sqrt(r*r-a*a)
{
float yoff = sqrt(r2 - xoff*xoff);
if (x == 20 && y == 20)
{
}
if ((int)x + xoff < w && (int)x + xoff >= 0 && (int)y + yoff < h && (int)y + yoff >= 0){
//printf("1\n");
//printf("1:x:%d; y:%d; xoff:%d; yoff:%d; r:%d adding to %d\n ", x, y, xoff, (int)yoff, r, x + xoff + (y + (int)yoff)*w);
atomicAdd(&midpointSum[x + xoff + (y + (int)yoff)*w], 1);
}
if ((int)x - xoff < w && (int)x - xoff >= 0 && (int)y + yoff < h && (int)y + yoff >= 0){
//printf("2\n");
//printf("2:x:%d; y:%d; xoff:%d; yoff:%d; r:%d adding to %d\n ", x, y, xoff, (int)yoff, r, x + xoff + (y + (int)yoff)*w);
atomicAdd(&midpointSum[x - xoff + (y + (int)yoff)*w], 1);
}
if ((int)x + xoff < w && (int)x + xoff >= 0 && (int)y - yoff < h && (int)y - yoff >= 0){
//printf("3\n");
//printf("3:x:%d; y:%d; xoff:%d; yoff:%d; r:%d adding to %d\n ", x, y, xoff, (int)yoff, r, x + xoff + (y + (int)yoff)*w);
atomicAdd(&midpointSum[x + xoff + (y - (int)yoff)*w], 1);
}
if ((int)x - xoff < w && (int)x - xoff >= 0 && (int)y - yoff < h && (int)y - yoff >= 0){
//printf("4\n");
//printf("4:x:%d; y:%d; xoff:%d; yoff:%d; r:%d adding to %d\n ", x, y, xoff, (int)yoff, r, x + xoff + (y + (int)yoff)*w);
atomicAdd(&midpointSum[x - xoff + (y - (int)yoff)*w], 1);
}
}
/*
if (midpointSum[x + y*w]>0)
{
rating[x + y*w] = 255;
printf("(%d|%d)->midpointSum: %d rating:%f \n", x,y, midpointSum[x + y*w], rating[x + y*w]);
}*/
//if (midpointSum[x + y*w]>(r + 1)) //if a point is the midpoint for at least a half circle it should be considered as midpoint
//rating[x + y*w] = midpointSum[x + y*w]*;
//else
//rating[x + y*w] = 0;
}
__global__ void circleMidpointAnalysis(float* rating, unsigned int* midpointSum,unsigned int used_radius, unsigned int w, unsigned int h){
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= w || y >= h)
return;
//if (midpointSum[x + y*w]>0)
{
// rating[x + y*w] = 255;
}
if (midpointSum[x + y*w] > (1.7 * used_radius + 1)) //if a point is the midpoint for at least a half circle it should be considered as midpoint
rating[x + y*w] = midpointSum[x + y*w];
else
rating[x + y*w] = 0;
}
//combines 2 images in image1
__global__ void kernelCombineImagesAndRemoveBelowThreshold(float* image1, float* image2, float threshold, unsigned int w, unsigned int h) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= w || y >= h)
return;
image1[x + y*w] += image2[x + y*w];
if (image1[x + y*w] < threshold)
image1[x + y*w] = 0;
}
|
8786db8e2f387e50793216ccb8573f599abf2778.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/pair.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <array>
#include <limits>
#include <sstream>
#include <tuple>
#include <type_traits>
#include "open3d/core/Blob.h"
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/Device.h"
#include "open3d/core/Dispatch.h"
#include "open3d/core/FunctionTraits.h"
#include "open3d/core/Indexer.h"
#include "open3d/core/MemoryManager.h"
#include "open3d/core/ParallelFor.h"
#include "open3d/core/SizeVector.h"
#include "open3d/core/Tensor.h"
#include "open3d/core/kernel/Reduction.h"
#include "open3d/utility/Logging.h"
// CUDA reduction is based on PyTorch's CUDA reduction implementation.
// See: aten/src/ATen/native/cuda/Reduce.cuh
#if __CUDA_ARCH__ >= 750
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024;
#else
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048;
#endif
constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024;
constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256;
#define OPEN3D_MAX_THREADS_PER_BLOCK(val) \
(((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \
: CUDA_THREADS_PER_BLOCK_FALLBACK)
#define OPEN3D_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \
((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \
? (blocks_per_sm) \
: ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \
(threads_per_block))))
#define OPEN3D_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \
__launch_bounds__((OPEN3D_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \
(OPEN3D_MIN_BLOCKS_PER_SM((max_threads_per_block), \
(min_blocks_per_sm))))
template <typename T>
OPEN3D_DEVICE __forceinline__ T WARP_SHFL_DOWN(T value,
unsigned int delta,
int width = warpSize,
unsigned int mask = 0xffffffff) {
#if TORCH_HIP_VERSION >= 9000
return __shfl_down_sync(mask, value, delta, width);
#else
return __shfl_down(value, delta, width);
#endif
}
namespace open3d {
namespace core {
namespace kernel {
static inline int64_t DivUp(int64_t a, int64_t b) { return (a + b - 1) / b; }
// Returns reduced fraction numerator & denominator
OPEN3D_HOST_DEVICE static void ReduceFraction(int64_t& numerator,
int64_t& denominator) {
// Get GCD of num and denom using Euclid's algorithm.
// Can replace this with std::gcd if we ever support c++17.
int64_t a = denominator;
int64_t b = numerator;
while (b != 0) {
a %= b;
int64_t tmp = a;
a = b;
b = tmp;
}
// a is now the GCD
numerator /= a;
denominator /= a;
}
class ReduceConfig {
public:
static constexpr int BLOCK_X = 0;
static constexpr int BLOCK_Y = 1;
static constexpr int CTA = 2;
static constexpr int MAX_NUM_THREADS = 512;
int num_inputs_per_output_;
int num_outputs_;
int step_input_ = 1;
int step_output_ = 1;
int ctas_per_output_ = 1;
private:
int element_size_bytes_;
int input_mult_[3] = {0, 0, 0};
int output_mult_[2] = {0, 0};
int block_width_;
int block_height_;
int num_threads_;
public:
ReduceConfig(int element_size_bytes, const Indexer& indexer)
: element_size_bytes_(element_size_bytes) {
num_outputs_ = indexer.NumOutputElements();
num_inputs_per_output_ = indexer.NumWorkloads() / num_outputs_;
// Adjust block size to map block width to fastest changing dimension of
// input tensor. This grants the best possible memory accessing pattern,
// given that for non-contiguous tensor with space in between, we cannot
// have perfect memory coalescing.
bool reduction_on_fastest_striding_dimension =
(indexer.NumReductionDims() == indexer.NumDims()) ||
(indexer.GetInput(0).byte_strides_[0] <
indexer.GetInput(0).byte_strides_[indexer.NumReductionDims()]);
// Notice that dim0 & dim1 does NOT guarantee any launch configuration
// here! dim0 & dim1 are more like the upper bound of the block
// dimension. The actual launch config and reduction scheme is
// determined by setting values to `input_mult_` and
// `output_mult_`. We try to max out dim1 so that we have enough
// threads per CTA to deliver performance for larger problem size.
int64_t dim0;
int64_t dim1;
if (reduction_on_fastest_striding_dimension) {
// Map block.x to the fastest reducing dimension. It implies:
// 1. BlockXReduce is required.
// 2. block.y now max out to num_outputs.
dim0 = indexer.GetMasterShape()[0];
dim1 = num_outputs_;
} else {
// Map block.x to the fastest non reducing dimension. It implies:
// 1. BlockXReduce is turned off.
// 2. block.y now max out to num_inputs_per_output_.
dim0 = indexer.GetMasterShape()[indexer.NumReductionDims()];
dim1 = num_inputs_per_output_;
}
// Adjust block_width and block_height
SetBlockDimension(dim0, dim1);
int block_width = block_width_;
int block_height = block_height_;
if (indexer.NumDims() == 0 || reduction_on_fastest_striding_dimension) {
// Split the input across lanes if the input is contiguous in the
// reduced dimension. This will require reduction between threads
// using warp shuffle instructions and shared memory (if
// block_width > warpSize).
input_mult_[0] = SplitInput(block_width);
} else {
// Otherwise split the output across lanes in a warp.
output_mult_[0] = SplitOutput(block_width);
}
if (ValuesPerThread() >= block_height * 16 ||
ValuesPerThread() >= 256) {
// Divide the input across warps in a thread-block, if that leaves
// at least 16 elements to be summed by each thread. This will
// require inter-warp reduction using shared memory.
input_mult_[1] = SplitInput(block_height);
} else {
// Otherwise, each warp handles a separate output.
output_mult_[1] = SplitOutput(block_height);
}
if (input_mult_[1] != 0 && ValuesPerThread() >= 256 &&
num_outputs_ <= 4096) {
// Divide the input across thread-blocks if the amount of work
// per-thread is large enough and the size of the output is small
// enough. This will require a reduction using global memory.
ctas_per_output_ = DivUp(ValuesPerThread(), 16);
if (ctas_per_output_ > 65535) {
ctas_per_output_ = 65535;
}
input_mult_[2] = SplitInput(ctas_per_output_);
}
}
/// Returns floor(log2(n))
static inline int LastPow2(int n) {
// Dtype.h asserts sizeof(int) == 4.
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return ::max(1, n - (n >> 1));
}
void SetBlockDimension(int64_t dim0, int64_t dim1) {
int dim0_pow2 = dim0 < MAX_NUM_THREADS
? static_cast<int>(LastPow2(dim0))
: MAX_NUM_THREADS;
int dim1_pow2 = dim1 < MAX_NUM_THREADS
? static_cast<int>(LastPow2(dim1))
: MAX_NUM_THREADS;
block_width_ =
::min(dim0_pow2, CUDAState::GetInstance().GetWarpSize());
block_height_ =
::min(dim1_pow2, int(MAX_NUM_THREADS / block_width_));
block_width_ =
::min(dim0_pow2, int(MAX_NUM_THREADS / block_height_));
num_threads_ = block_width_ * block_height_;
}
int SplitInput(int parallelism) {
int step = step_input_;
step_input_ *= parallelism;
return step;
}
int SplitOutput(int parallelism) {
int step = step_output_;
step_output_ *= parallelism;
return step;
}
dim3 BlockDim() const { return dim3(block_width_, block_height_); }
dim3 GridDim() const {
return dim3(DivUp(num_outputs_, step_output_), ctas_per_output_);
}
OPEN3D_HOST_DEVICE bool ShouldBlockXReduce() const {
return input_mult_[BLOCK_X] != 0;
}
OPEN3D_HOST_DEVICE bool ShouldBlockYReduce() const {
return input_mult_[BLOCK_Y] != 0;
}
OPEN3D_HOST_DEVICE bool ShouldGlobalReduce() const {
return input_mult_[CTA] != 0;
}
OPEN3D_DEVICE bool ShouldStore(int output_idx) const {
return output_idx < num_outputs_ &&
(!ShouldBlockXReduce() || threadIdx.x == 0) &&
(!ShouldBlockYReduce() || threadIdx.y == 0);
}
OPEN3D_HOST_DEVICE int InputIdx() const {
int lane = threadIdx.x;
int warp = threadIdx.y;
int cta2 = blockIdx.y;
return (lane * input_mult_[BLOCK_X] + warp * input_mult_[BLOCK_Y] +
cta2 * input_mult_[CTA]);
}
OPEN3D_HOST_DEVICE int OutputIdx() const {
int lane = threadIdx.x;
int warp = threadIdx.y;
int cta1 = blockIdx.x;
return (lane * output_mult_[BLOCK_X] + warp * output_mult_[BLOCK_Y] +
cta1 * step_output_);
}
OPEN3D_DEVICE int SharedMemoryOffset(int offset) const {
return threadIdx.x + (threadIdx.y + offset) * blockDim.x;
}
OPEN3D_DEVICE int StagingMemoryOffset(int cta2) const {
int offset = cta2 + blockIdx.x * gridDim.y;
if (!ShouldBlockXReduce()) {
offset = threadIdx.x + offset * blockDim.x;
}
return offset;
}
int SharedMemorySize() const {
if (!ShouldBlockYReduce() &&
(!ShouldBlockXReduce() ||
block_width_ <= CUDAState::GetInstance().GetWarpSize())) {
return 0;
}
return element_size_bytes_ * num_threads_;
}
int64_t GlobalMemorySize() const {
if (!ShouldGlobalReduce()) {
return 0;
}
auto size =
(int64_t)element_size_bytes_ * num_outputs_ * ctas_per_output_;
if (!ShouldBlockXReduce()) {
size *= BlockDim().x;
}
return size;
}
int SemaphoreSize() const {
if (!ShouldGlobalReduce()) {
return 0;
}
return sizeof(int) * GridDim().x;
}
int ValuesPerThread() const {
return DivUp(num_inputs_per_output_, step_input_);
}
std::string ToString() const {
std::string input_mult_str = fmt::format(
"[{},{},{}]", input_mult_[0], input_mult_[1], input_mult_[2]);
std::string output_mult_str =
fmt::format("[{},{}]", output_mult_[0], output_mult_[1]);
std::string block_str = fmt::format("[{},{},{}]", BlockDim().x,
BlockDim().y, BlockDim().z);
std::string grid_str = fmt::format("[{},{},{}]", GridDim().x,
GridDim().y, GridDim().z);
std::string str = fmt::format(
"REDUCEConfig(element_size_bytes_={}, "
"num_inputs_per_output_={}, num_outputs_={}, "
"step_input_={}, step_output_={}, ctas_per_output_={}, "
"input_mult_={}, output_mult_={}, values_per_thread={}, "
"block={}, grid={}, global_memory_size={})",
element_size_bytes_, num_inputs_per_output_, num_outputs_,
step_input_, step_output_, ctas_per_output_, input_mult_str,
output_mult_str, ValuesPerThread(), block_str, grid_str,
GlobalMemorySize());
return str;
}
};
template <int nt, typename R>
OPEN3D_LAUNCH_BOUNDS_2(nt, 4)
__global__ void ReduceKernel(R reduction) {
reduction.Run();
}
template <typename index_t>
static OffsetCalculator<2, index_t> MakeOutputCalculator(
const Indexer& indexer) {
int num_reduction_dims = indexer.NumReductionDims();
int num_output_dims = indexer.NumDims() - num_reduction_dims;
std::array<const int64_t*, 2> strides = {
indexer.GetOutput().byte_strides_ + num_reduction_dims,
indexer.GetInput(0).byte_strides_ + num_reduction_dims,
};
const int64_t* shape = indexer.GetMasterShape() + num_reduction_dims;
return OffsetCalculator<2, index_t>(num_output_dims, shape, strides.data());
}
template <typename index_t>
static OffsetCalculator<1, index_t> MakeInputCalculator(
const Indexer& indexer) {
int num_reduction_dims = indexer.NumReductionDims();
std::array<const int64_t*, 1> strides = {
indexer.GetInput(0).byte_strides_,
};
return OffsetCalculator<1, index_t>(
num_reduction_dims, indexer.GetMasterShape(), strides.data());
}
template <int vt, typename index_t, typename func_t>
OPEN3D_DEVICE void StridedIterate(func_t f,
index_t begin,
index_t end,
index_t stride) {
if (begin + (vt - 1) * stride < end) {
#pragma unroll
for (index_t i = 0; i < vt; i++) {
f(i, begin + i * stride);
}
} else {
#pragma unroll
for (index_t i = 0; i < vt; i++) {
index_t idx = begin + i * stride;
if (idx < end) {
f(i, idx);
}
}
}
}
/// Combime() and Reduce() are the same for regular reduction ops.
template <typename out_scalar_t, typename func_t>
class RegularReduceOps {
using arg_t = typename BinaryFunctionTraits<func_t>::arg0_t;
using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t;
public:
RegularReduceOps(const func_t& op) : reduce_func_(op) {}
static inline OPEN3D_DEVICE out_scalar_t Project(arg_t arg) {
return (out_scalar_t)arg;
}
static inline OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) {
return WARP_SHFL_DOWN(arg, offset);
}
OPEN3D_DEVICE inline arg_t Combine(arg_t acc, scalar_t val) const {
return reduce_func_(acc, val);
}
/// Idx is ignored for RegularReduceOps.
OPEN3D_DEVICE inline arg_t Reduce(arg_t acc,
scalar_t val,
int64_t idx) const {
return reduce_func_(acc, val);
}
private:
func_t reduce_func_ = nullptr;
};
template <typename scalar_t, typename func_t>
RegularReduceOps<scalar_t, func_t> WrapRegularReduceOps(const func_t& op) {
return RegularReduceOps<scalar_t, func_t>{op};
}
template <typename func_t>
class ArgReduceOps {
using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t;
using index_t = int64_t;
using arg_t = thrust::pair<scalar_t, index_t>;
public:
ArgReduceOps(const func_t comp_func) : comp_func_(comp_func) {}
static OPEN3D_DEVICE index_t Project(arg_t arg) { return arg.second; }
static OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) {
return arg_t(WARP_SHFL_DOWN(arg.first, offset),
WARP_SHFL_DOWN(arg.second, offset));
}
/// Combine(pair<val_t, idx_t>, pair<val_t, idx_t>) -> pair<val_t, idx_t>.
/// Called at subsequent rounds of reduction, when values are already
/// associated with indices.
OPEN3D_DEVICE inline arg_t Combine(arg_t a, arg_t b) const {
return comp_func_(a.first, b.first) ? a : b;
}
/// Reduce(pair<val_t, idx_t>, val_t, idx_t) -> pair<val_t, idx_t>.
/// Called at the first round of reduction, when values are not yet
/// associated with indices.
OPEN3D_DEVICE inline arg_t Reduce(arg_t arg,
scalar_t val,
int64_t idx) const {
return comp_func_(arg.first, val) ? arg : arg_t(val, idx);
}
private:
func_t comp_func_ = nullptr;
};
template <typename func_t>
ArgReduceOps<func_t> WrapArgReduceOps(const func_t& comp_func) {
return ArgReduceOps<func_t>{comp_func};
}
template <typename scalar_t,
typename ops_t,
typename index_t,
typename out_scalar_t = scalar_t,
int vt0 = 4>
class ReduceOp {
using traits = FunctionTraits<decltype(&ops_t::Reduce)>;
using arg_t =
typename std::decay<typename traits::template arg<0>::type>::type;
using InputCalculator = OffsetCalculator<1, index_t>;
using OutputCalculator = OffsetCalculator<2, index_t>;
public:
ReduceOp(ops_t ops,
ReduceConfig config,
InputCalculator input_calc,
OutputCalculator output_calc,
const void* src,
char* dst,
void* acc_buf,
void* cta_buf,
int* semaphores,
arg_t identity,
bool accumulate,
bool final_output)
: ops_(ops),
config_(config),
input_calc_(input_calc),
output_calc_(output_calc),
src_(src),
dst_(dst),
acc_buf_(acc_buf),
cta_buf_(cta_buf),
semaphores_(semaphores),
identity_(identity),
accumulate_(accumulate),
final_output_(final_output) {}
OPEN3D_DEVICE void Run() const {
extern __shared__ char shared_memory[];
index_t output_idx = config_.OutputIdx();
index_t input_idx = config_.InputIdx();
auto base_offsets = output_calc_.get(output_idx);
arg_t value = identity_;
if (output_idx < config_.num_outputs_ &&
input_idx < config_.num_inputs_per_output_) {
auto input_slice = (const char*)src_ + base_offsets[1];
value = ThreadReduce((const scalar_t*)input_slice);
}
if (config_.ShouldBlockYReduce()) {
value = BlockYReduce(value, shared_memory);
}
if (config_.ShouldBlockXReduce()) {
value = BlockXReduce(value, shared_memory);
}
auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]);
arg_t* acc = nullptr;
if (acc_buf_ != nullptr) {
int64_t numerator = (int64_t)sizeof(arg_t);
int64_t denominator = (int64_t)sizeof(out_scalar_t);
ReduceFraction(numerator, denominator);
acc = (arg_t*)((char*)acc_buf_ +
(base_offsets[0] * numerator / denominator));
}
if (config_.ShouldGlobalReduce()) {
value = GlobalReduce(value, acc, shared_memory);
} else if (config_.ShouldStore(output_idx)) {
if (acc == nullptr) {
if (accumulate_) {
value = AccumulateInOutput<can_accumulate_in_output>(out,
value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*out = GetAccumulatedOutput<can_accumulate_in_output>(
out, value);
}
} else {
if (accumulate_) {
value = ops_.Combine(*acc, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*acc = value;
}
}
}
}
OPEN3D_DEVICE arg_t ThreadReduce(const scalar_t* data) const {
index_t idx = config_.InputIdx();
// Multiple accumulators to remove dependency between unrolled loops.
arg_t value_list[vt0];
#pragma unroll
for (int i = 0; i < vt0; i++) {
value_list[i] = identity_;
}
index_t end = config_.num_inputs_per_output_;
index_t stride = config_.step_input_;
index_t element_stride = input_calc_.strides_[0][0] / sizeof(scalar_t);
// Reducing layers of function calls so compiler could do proper loop
// unroll that exposes instruction level parallelism.
while (idx < config_.num_inputs_per_output_) {
// load input
SmallArray<scalar_t, vt0> values;
if (input_calc_.dims_ == 1) {
StridedIterate<vt0>(
[&](index_t i, index_t idx) {
values[i] = data[idx * element_stride];
},
idx, end, stride);
} else {
StridedIterate<vt0>(
[&](index_t i, index_t idx) {
values[i] = data[input_calc_.get(idx)[0] /
sizeof(scalar_t)];
},
idx, end, stride);
}
// compute
StridedIterate<vt0, index_t>(
[&](index_t i, index_t idx) {
value_list[i] =
ops_.Reduce(value_list[i], values[i], idx);
},
idx, config_.num_inputs_per_output_, config_.step_input_);
// step offset
idx += config_.step_input_ * vt0;
}
#pragma unroll
for (int i = 1; i < vt0; i++) {
value_list[0] = ops_.Combine(value_list[0], value_list[i]);
}
return value_list[0];
}
OPEN3D_DEVICE arg_t BlockXReduce(arg_t value, char* shared_memory) const {
int dim_x = blockDim.x;
arg_t* shared = (arg_t*)shared_memory;
if (dim_x > warpSize) {
int address_base = threadIdx.x + threadIdx.y * blockDim.x;
shared[address_base] = value;
for (int offset = dim_x / 2; offset >= warpSize; offset >>= 1) {
__syncthreads();
if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) {
arg_t other = shared[address_base + offset];
value = ops_.Combine(value, other);
shared[address_base] = value;
}
}
dim_x = warpSize;
}
__syncthreads();
for (int offset = 1; offset < dim_x; offset <<= 1) {
arg_t other = ops_.WarpShflDown(value, offset);
value = ops_.Combine(value, other);
}
return value;
}
OPEN3D_DEVICE arg_t BlockYReduce(arg_t value, char* shared_memory) const {
arg_t* shared = (arg_t*)shared_memory;
shared[config_.SharedMemoryOffset(0)] = value;
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
arg_t other = shared[config_.SharedMemoryOffset(offset)];
value = ops_.Combine(value, other);
shared[config_.SharedMemoryOffset(0)] = value;
}
}
return value;
}
OPEN3D_DEVICE bool MarkBlockFinished() const {
__shared__ bool is_last_block_done_shared;
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
int prev_blocks_finished = atomicAdd(&semaphores_[blockIdx.x], 1);
is_last_block_done_shared = (prev_blocks_finished == gridDim.y - 1);
}
__syncthreads();
return is_last_block_done_shared;
}
template <bool can_acc>
OPEN3D_DEVICE arg_t AccumulateInOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<can_acc>::type* = nullptr) const {
return ops_.Combine(*out, value);
}
// This function should never be called --
// it's the version of `AccumulateInOutput`
// when accumulation in the output is not possible.
template <bool can_acc>
OPEN3D_DEVICE arg_t AccumulateInOutput(
out_scalar_t*,
arg_t,
typename std::enable_if<!can_acc>::type* = nullptr) const {
OPEN3D_ASSERT(false);
return arg_t{};
}
template <bool can_acc>
OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<can_acc>::type* = nullptr) const {
OPEN3D_ASSERT(!final_output_);
return (out_scalar_t)value;
}
// This function should never be called --
// it's the version of `GetAccumulatedOutput`
// when accumulation in the output is not possible.
template <bool can_acc>
OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<!can_acc>::type* = nullptr) const {
OPEN3D_ASSERT(false);
return *out;
}
template <class T>
OPEN3D_DEVICE void SetResults(const T x, const index_t base_offset) const {
auto res = (out_scalar_t*)((char*)dst_ + base_offset);
*res = x;
}
OPEN3D_DEVICE void SetResultsToOutput(arg_t value,
index_t base_offset) const {
OPEN3D_ASSERT(final_output_);
SetResults(ops_.Project(value), base_offset);
}
OPEN3D_DEVICE arg_t GlobalReduce(arg_t value,
arg_t* acc,
char* shared_memory) const {
arg_t* reduce_buffer = (arg_t*)cta_buf_;
index_t output_idx = config_.OutputIdx();
auto base_offsets = output_calc_.get(output_idx);
auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]);
bool should_store = config_.ShouldStore(config_.OutputIdx());
if (should_store) {
index_t offset = config_.StagingMemoryOffset(blockIdx.y);
reduce_buffer[offset] = value;
}
__threadfence(); // make sure writes are globally visible
__syncthreads(); // if multiple warps in this block wrote to staging,
// make sure they're all done
bool is_last_block_done = MarkBlockFinished();
if (is_last_block_done) {
value = identity_;
if (config_.ShouldBlockXReduce()) {
index_t input_offset = threadIdx.x + threadIdx.y * blockDim.x;
index_t step = blockDim.x * blockDim.y;
for (; input_offset < config_.ctas_per_output_;
input_offset += step) {
index_t idx = config_.StagingMemoryOffset(input_offset);
arg_t next = reduce_buffer[idx];
value = ops_.Combine(value, next);
}
} else {
index_t input_offset = threadIdx.y;
index_t step = blockDim.y;
for (; input_offset < config_.ctas_per_output_;
input_offset += step) {
index_t idx = config_.StagingMemoryOffset(input_offset);
arg_t next = reduce_buffer[idx];
value = ops_.Combine(value, next);
}
}
value = BlockYReduce(value, shared_memory);
if (config_.ShouldBlockXReduce()) {
value = BlockXReduce(value, shared_memory);
}
if (should_store) {
if (acc == nullptr) {
if (accumulate_) {
value = AccumulateInOutput<can_accumulate_in_output>(
out, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*out = GetAccumulatedOutput<can_accumulate_in_output>(
out, value);
}
} else {
if (accumulate_) {
value = ops_.Combine(*acc, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*acc = value;
}
}
}
}
return value;
}
private:
static constexpr bool can_accumulate_in_output =
std::is_convertible<arg_t, out_scalar_t>::value &&
std::is_convertible<out_scalar_t, arg_t>::value;
static constexpr float acc_buffer_multiplier =
(float)sizeof(arg_t) / sizeof(out_scalar_t);
ops_t ops_;
ReduceConfig config_;
InputCalculator input_calc_;
OutputCalculator output_calc_;
const void* src_;
const char* dst_;
// acc_buf_ used for accumulation among sub Tensor Iterator when
// accumulation on output is not permissible
void* acc_buf_;
// cta_buf_ used for accumulation between blocks during global reduction
void* cta_buf_;
int* semaphores_;
arg_t identity_;
bool accumulate_;
bool final_output_;
};
class AccumulationBuffer {
public:
AccumulationBuffer() {}
AccumulationBuffer(int64_t acc_t_size,
int64_t out_t_size,
char* out_ptr,
int64_t size) {
out_ptr_ = (char*)out_ptr;
if (out_t_size >= acc_t_size) {
// reusing output buffer for accumulation.
acc_ptr_ = (char*)out_ptr;
numerator_ = 1;
denominator_ = 1;
} else {
int device_id = CUDAState::GetInstance().GetCurrentDeviceID();
Device device(Device::DeviceType::CUDA, device_id);
buffer_ = std::make_unique<Blob>(size, device);
acc_ptr_ = (char*)buffer_->GetDataPtr();
numerator_ = acc_t_size;
denominator_ = out_t_size;
ReduceFraction(numerator_, denominator_);
}
}
char* GetAccSlice(char* out_ptr) {
if (numerator_ == -1 || acc_ptr_ == nullptr) {
return nullptr;
}
return acc_ptr_ + ((out_ptr - out_ptr_) * numerator_ / denominator_);
}
private:
std::unique_ptr<Blob> buffer_;
char* acc_ptr_ = nullptr;
char* out_ptr_ = nullptr;
float size_factor_ = -1;
int64_t numerator_ = -1;
int64_t denominator_ = -1;
};
class CUDAReductionEngine {
public:
CUDAReductionEngine(const CUDAReductionEngine&) = delete;
CUDAReductionEngine& operator=(const CUDAReductionEngine&) = delete;
CUDAReductionEngine(const Indexer& indexer) : indexer_(indexer) {}
template <typename func_t, typename scalar_t>
void Run(const func_t& reduce_func, scalar_t identity) {
if (indexer_.NumWorkloads() == 0) {
utility::LogError(
"0-sized input should be handled outside of the reudction "
"engine.");
}
if (indexer_.NumInputs() != 1) {
utility::LogError("Reduction op must have exactly one input.");
}
OPEN3D_ASSERT_HOST_DEVICE_LAMBDA(func_t);
using arg0_t = typename BinaryFunctionTraits<func_t>::arg0_t;
using arg1_t = typename BinaryFunctionTraits<func_t>::arg1_t;
if (!std::is_same<scalar_t, arg0_t>::value ||
!std::is_same<scalar_t, arg1_t>::value) {
utility::LogError(
"Function input type must match with the identity's type.");
}
using res_t = typename BinaryFunctionTraits<func_t>::res_t;
if (std::is_same<res_t, bool>::value) {
// func_t is a comparison function (for arg-reduction).
// Signature: (scalar_t, scalar_t) -> bool.
RunReduce<scalar_t, int64_t>(
indexer_, WrapArgReduceOps(reduce_func),
thrust::pair<scalar_t, int64_t>(identity, 0));
} else {
// func_t is a regular reduction function.
// Signature: (scalar_t, scalar_t) -> scalar_t.
RunReduce<scalar_t, scalar_t>(
indexer_, WrapRegularReduceOps<scalar_t>(reduce_func),
identity);
}
}
private:
/// If the index cannot be represented in 32 bits, RunReduce calls itself
/// recursively.
template <typename scalar_t,
typename out_scalar_t,
int vt0 = 4,
typename ops_t,
typename ident_t>
static void RunReduce(Indexer& indexer,
const ops_t& ops,
ident_t identity,
AccumulationBuffer* acc_buf_ptr = nullptr) {
using traits = FunctionTraits<decltype(&ops_t::Reduce)>;
using arg_t = typename traits::template arg<0>::type;
static constexpr bool can_accumulate_in_output =
std::is_convertible<arg_t, out_scalar_t>::value;
bool can_use_32bit_indexing = indexer.CanUse32BitIndexing();
std::unique_ptr<AccumulationBuffer> owned_buf_ptr;
// The acc_buf_ptr is a shared pointer. It is create at the first
// entrance reused by all recursive function calls.
if (acc_buf_ptr == nullptr) {
// acc_buf_ptr holds buffer used for accumulation among multiple
// sub_iter when accumulation in output is not possible.
if (!can_accumulate_in_output && !can_use_32bit_indexing) {
int64_t output_memory_size = 1;
for (int dim = 0; dim < indexer.NumDims(); dim++) {
output_memory_size = ::max(
output_memory_size,
indexer.GetMasterShape()[dim] *
indexer.GetOutput().byte_strides_[dim]);
}
owned_buf_ptr.reset(new AccumulationBuffer(
sizeof(arg_t), sizeof(out_scalar_t),
(char*)indexer.GetOutput().data_ptr_,
output_memory_size * sizeof(arg_t)));
} else {
owned_buf_ptr.reset(new AccumulationBuffer());
}
acc_buf_ptr = owned_buf_ptr.get();
}
if (!can_use_32bit_indexing) {
for (auto& sub_indexer : indexer.SplitTo32BitIndexing()) {
RunReduce<scalar_t, out_scalar_t, vt0>(sub_indexer, ops,
identity, acc_buf_ptr);
}
return;
}
ReduceConfig config(sizeof(arg_t), indexer);
std::unique_ptr<Blob> buffer_blob;
std::unique_ptr<Blob> semaphores_blob;
void* buffer = nullptr;
void* semaphores = nullptr;
if (config.ShouldGlobalReduce()) {
int device_id = CUDAState::GetInstance().GetCurrentDeviceID();
Device device(Device::DeviceType::CUDA, device_id);
buffer_blob =
std::make_unique<Blob>(config.GlobalMemorySize(), device);
semaphores_blob =
std::make_unique<Blob>(config.SemaphoreSize(), device);
buffer = buffer_blob->GetDataPtr();
semaphores = semaphores_blob->GetDataPtr();
OPEN3D_CUDA_CHECK(
hipMemset(semaphores, 0, config.SemaphoreSize()));
}
OPEN3D_ASSERT(can_use_32bit_indexing);
const char* in_data = (char*)indexer.GetInput(0).data_ptr_;
char* out_data = (char*)indexer.GetOutput().data_ptr_;
char* acc_data = acc_buf_ptr->GetAccSlice(out_data);
auto output_calc = MakeOutputCalculator<uint32_t>(indexer);
auto input_calc = MakeInputCalculator<uint32_t>(indexer);
auto reduce_op = ReduceOp<scalar_t, ops_t, uint32_t, out_scalar_t, vt0>(
ops, config, input_calc, output_calc, in_data, out_data,
acc_data, buffer, (int*)semaphores, identity,
indexer.ShouldAccumulate(), indexer.IsFinalOutput());
// Launch reduce kernel
int shared_memory = config.SharedMemorySize();
hipLaunchKernelGGL(( ReduceKernel<ReduceConfig::MAX_NUM_THREADS>)
, dim3(config.GridDim()), dim3(config.BlockDim()), shared_memory,
core::cuda::GetStream(), reduce_op);
cuda::Synchronize();
OPEN3D_CUDA_CHECK(hipGetLastError());
}
private:
Indexer indexer_;
};
void ReductionCUDA(const Tensor& src,
Tensor& dst,
const SizeVector& dims,
bool keepdim,
ReductionOpCode op_code) {
if (s_regular_reduce_ops.find(op_code) != s_regular_reduce_ops.end()) {
Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims);
CUDAReductionEngine re(indexer);
Dtype dtype = src.GetDtype();
CUDAScopedDevice scoped_device(src.GetDevice());
DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() {
switch (op_code) {
case ReductionOpCode::Sum:
if (indexer.NumWorkloads() == 0) {
// 0-sized input can be reduced to non-0-sized outputs,
// where identity elements should be filled.
// E.g. np.sum(np.ones((0, 5)), axis=0).shape == (5,).
dst.Fill(0);
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a + b; },
static_cast<scalar_t>(0));
}
break;
case ReductionOpCode::Prod:
if (indexer.NumWorkloads() == 0) {
dst.Fill(1);
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a * b; },
static_cast<scalar_t>(1));
}
break;
case ReductionOpCode::Min:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport Min.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a < b ? a : b; },
static_cast<scalar_t>(
std::numeric_limits<scalar_t>::max()));
}
break;
case ReductionOpCode::Max:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport Max.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a > b ? a : b; },
static_cast<scalar_t>(std::numeric_limits<
scalar_t>::lowest()));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
});
} else if (s_arg_reduce_ops.find(op_code) != s_arg_reduce_ops.end()) {
if (dst.GetDtype() != core::Int64) {
utility::LogError("Arg-reduction must have int64 output dtype.");
}
Indexer indexer({src}, dst, DtypePolicy::INPUT_SAME, dims);
CUDAReductionEngine re(indexer);
Dtype dtype = src.GetDtype();
CUDAScopedDevice scoped_device(src.GetDevice());
DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() {
switch (op_code) {
case ReductionOpCode::ArgMin:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport ArgMin.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> bool { return a < b; },
static_cast<scalar_t>(
std::numeric_limits<scalar_t>::max()));
}
break;
case ReductionOpCode::ArgMax:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport ArgMax.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> bool { return a > b; },
static_cast<scalar_t>(std::numeric_limits<
scalar_t>::lowest()));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
});
} else if (s_boolean_reduce_ops.find(op_code) !=
s_boolean_reduce_ops.end()) {
if (src.GetDtype() != core::Bool) {
utility::LogError(
"Boolean reduction only supports boolean input tensor.");
}
if (dst.GetDtype() != core::Bool) {
utility::LogError(
"Boolean reduction only supports boolean output tensor.");
}
Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims);
CUDAReductionEngine re(indexer);
CUDAScopedDevice scoped_device(src.GetDevice());
switch (op_code) {
case ReductionOpCode::All:
if (indexer.NumWorkloads() == 0) {
dst.Fill(true);
} else {
re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b)
-> uint8_t { return a && b; },
static_cast<uint8_t>(true));
}
break;
case ReductionOpCode::Any:
if (indexer.NumWorkloads() == 0) {
dst.Fill(false);
} else {
re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b)
-> uint8_t { return a || b; },
static_cast<uint8_t>(false));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
} else {
utility::LogError("Unsupported op code.");
}
}
} // namespace kernel
} // namespace core
} // namespace open3d
| 8786db8e2f387e50793216ccb8573f599abf2778.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/pair.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <array>
#include <limits>
#include <sstream>
#include <tuple>
#include <type_traits>
#include "open3d/core/Blob.h"
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/Device.h"
#include "open3d/core/Dispatch.h"
#include "open3d/core/FunctionTraits.h"
#include "open3d/core/Indexer.h"
#include "open3d/core/MemoryManager.h"
#include "open3d/core/ParallelFor.h"
#include "open3d/core/SizeVector.h"
#include "open3d/core/Tensor.h"
#include "open3d/core/kernel/Reduction.h"
#include "open3d/utility/Logging.h"
// CUDA reduction is based on PyTorch's CUDA reduction implementation.
// See: aten/src/ATen/native/cuda/Reduce.cuh
#if __CUDA_ARCH__ >= 750
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024;
#else
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048;
#endif
constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024;
constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256;
#define OPEN3D_MAX_THREADS_PER_BLOCK(val) \
(((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \
: CUDA_THREADS_PER_BLOCK_FALLBACK)
#define OPEN3D_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \
((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \
? (blocks_per_sm) \
: ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \
(threads_per_block))))
#define OPEN3D_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \
__launch_bounds__((OPEN3D_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \
(OPEN3D_MIN_BLOCKS_PER_SM((max_threads_per_block), \
(min_blocks_per_sm))))
template <typename T>
OPEN3D_DEVICE __forceinline__ T WARP_SHFL_DOWN(T value,
unsigned int delta,
int width = warpSize,
unsigned int mask = 0xffffffff) {
#if CUDA_VERSION >= 9000
return __shfl_down_sync(mask, value, delta, width);
#else
return __shfl_down(value, delta, width);
#endif
}
namespace open3d {
namespace core {
namespace kernel {
static inline int64_t DivUp(int64_t a, int64_t b) { return (a + b - 1) / b; }
// Returns reduced fraction numerator & denominator
OPEN3D_HOST_DEVICE static void ReduceFraction(int64_t& numerator,
int64_t& denominator) {
// Get GCD of num and denom using Euclid's algorithm.
// Can replace this with std::gcd if we ever support c++17.
int64_t a = denominator;
int64_t b = numerator;
while (b != 0) {
a %= b;
int64_t tmp = a;
a = b;
b = tmp;
}
// a is now the GCD
numerator /= a;
denominator /= a;
}
class ReduceConfig {
public:
static constexpr int BLOCK_X = 0;
static constexpr int BLOCK_Y = 1;
static constexpr int CTA = 2;
static constexpr int MAX_NUM_THREADS = 512;
int num_inputs_per_output_;
int num_outputs_;
int step_input_ = 1;
int step_output_ = 1;
int ctas_per_output_ = 1;
private:
int element_size_bytes_;
int input_mult_[3] = {0, 0, 0};
int output_mult_[2] = {0, 0};
int block_width_;
int block_height_;
int num_threads_;
public:
ReduceConfig(int element_size_bytes, const Indexer& indexer)
: element_size_bytes_(element_size_bytes) {
num_outputs_ = indexer.NumOutputElements();
num_inputs_per_output_ = indexer.NumWorkloads() / num_outputs_;
// Adjust block size to map block width to fastest changing dimension of
// input tensor. This grants the best possible memory accessing pattern,
// given that for non-contiguous tensor with space in between, we cannot
// have perfect memory coalescing.
bool reduction_on_fastest_striding_dimension =
(indexer.NumReductionDims() == indexer.NumDims()) ||
(indexer.GetInput(0).byte_strides_[0] <
indexer.GetInput(0).byte_strides_[indexer.NumReductionDims()]);
// Notice that dim0 & dim1 does NOT guarantee any launch configuration
// here! dim0 & dim1 are more like the upper bound of the block
// dimension. The actual launch config and reduction scheme is
// determined by setting values to `input_mult_` and
// `output_mult_`. We try to max out dim1 so that we have enough
// threads per CTA to deliver performance for larger problem size.
int64_t dim0;
int64_t dim1;
if (reduction_on_fastest_striding_dimension) {
// Map block.x to the fastest reducing dimension. It implies:
// 1. BlockXReduce is required.
// 2. block.y now max out to num_outputs.
dim0 = indexer.GetMasterShape()[0];
dim1 = num_outputs_;
} else {
// Map block.x to the fastest non reducing dimension. It implies:
// 1. BlockXReduce is turned off.
// 2. block.y now max out to num_inputs_per_output_.
dim0 = indexer.GetMasterShape()[indexer.NumReductionDims()];
dim1 = num_inputs_per_output_;
}
// Adjust block_width and block_height
SetBlockDimension(dim0, dim1);
int block_width = block_width_;
int block_height = block_height_;
if (indexer.NumDims() == 0 || reduction_on_fastest_striding_dimension) {
// Split the input across lanes if the input is contiguous in the
// reduced dimension. This will require reduction between threads
// using warp shuffle instructions and shared memory (if
// block_width > warpSize).
input_mult_[0] = SplitInput(block_width);
} else {
// Otherwise split the output across lanes in a warp.
output_mult_[0] = SplitOutput(block_width);
}
if (ValuesPerThread() >= block_height * 16 ||
ValuesPerThread() >= 256) {
// Divide the input across warps in a thread-block, if that leaves
// at least 16 elements to be summed by each thread. This will
// require inter-warp reduction using shared memory.
input_mult_[1] = SplitInput(block_height);
} else {
// Otherwise, each warp handles a separate output.
output_mult_[1] = SplitOutput(block_height);
}
if (input_mult_[1] != 0 && ValuesPerThread() >= 256 &&
num_outputs_ <= 4096) {
// Divide the input across thread-blocks if the amount of work
// per-thread is large enough and the size of the output is small
// enough. This will require a reduction using global memory.
ctas_per_output_ = DivUp(ValuesPerThread(), 16);
if (ctas_per_output_ > 65535) {
ctas_per_output_ = 65535;
}
input_mult_[2] = SplitInput(ctas_per_output_);
}
}
/// Returns floor(log2(n))
static inline int LastPow2(int n) {
// Dtype.h asserts sizeof(int) == 4.
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return std::max(1, n - (n >> 1));
}
void SetBlockDimension(int64_t dim0, int64_t dim1) {
int dim0_pow2 = dim0 < MAX_NUM_THREADS
? static_cast<int>(LastPow2(dim0))
: MAX_NUM_THREADS;
int dim1_pow2 = dim1 < MAX_NUM_THREADS
? static_cast<int>(LastPow2(dim1))
: MAX_NUM_THREADS;
block_width_ =
std::min(dim0_pow2, CUDAState::GetInstance().GetWarpSize());
block_height_ =
std::min(dim1_pow2, int(MAX_NUM_THREADS / block_width_));
block_width_ =
std::min(dim0_pow2, int(MAX_NUM_THREADS / block_height_));
num_threads_ = block_width_ * block_height_;
}
int SplitInput(int parallelism) {
int step = step_input_;
step_input_ *= parallelism;
return step;
}
int SplitOutput(int parallelism) {
int step = step_output_;
step_output_ *= parallelism;
return step;
}
dim3 BlockDim() const { return dim3(block_width_, block_height_); }
dim3 GridDim() const {
return dim3(DivUp(num_outputs_, step_output_), ctas_per_output_);
}
OPEN3D_HOST_DEVICE bool ShouldBlockXReduce() const {
return input_mult_[BLOCK_X] != 0;
}
OPEN3D_HOST_DEVICE bool ShouldBlockYReduce() const {
return input_mult_[BLOCK_Y] != 0;
}
OPEN3D_HOST_DEVICE bool ShouldGlobalReduce() const {
return input_mult_[CTA] != 0;
}
OPEN3D_DEVICE bool ShouldStore(int output_idx) const {
return output_idx < num_outputs_ &&
(!ShouldBlockXReduce() || threadIdx.x == 0) &&
(!ShouldBlockYReduce() || threadIdx.y == 0);
}
OPEN3D_HOST_DEVICE int InputIdx() const {
int lane = threadIdx.x;
int warp = threadIdx.y;
int cta2 = blockIdx.y;
return (lane * input_mult_[BLOCK_X] + warp * input_mult_[BLOCK_Y] +
cta2 * input_mult_[CTA]);
}
OPEN3D_HOST_DEVICE int OutputIdx() const {
int lane = threadIdx.x;
int warp = threadIdx.y;
int cta1 = blockIdx.x;
return (lane * output_mult_[BLOCK_X] + warp * output_mult_[BLOCK_Y] +
cta1 * step_output_);
}
OPEN3D_DEVICE int SharedMemoryOffset(int offset) const {
return threadIdx.x + (threadIdx.y + offset) * blockDim.x;
}
OPEN3D_DEVICE int StagingMemoryOffset(int cta2) const {
int offset = cta2 + blockIdx.x * gridDim.y;
if (!ShouldBlockXReduce()) {
offset = threadIdx.x + offset * blockDim.x;
}
return offset;
}
int SharedMemorySize() const {
if (!ShouldBlockYReduce() &&
(!ShouldBlockXReduce() ||
block_width_ <= CUDAState::GetInstance().GetWarpSize())) {
return 0;
}
return element_size_bytes_ * num_threads_;
}
int64_t GlobalMemorySize() const {
if (!ShouldGlobalReduce()) {
return 0;
}
auto size =
(int64_t)element_size_bytes_ * num_outputs_ * ctas_per_output_;
if (!ShouldBlockXReduce()) {
size *= BlockDim().x;
}
return size;
}
int SemaphoreSize() const {
if (!ShouldGlobalReduce()) {
return 0;
}
return sizeof(int) * GridDim().x;
}
int ValuesPerThread() const {
return DivUp(num_inputs_per_output_, step_input_);
}
std::string ToString() const {
std::string input_mult_str = fmt::format(
"[{},{},{}]", input_mult_[0], input_mult_[1], input_mult_[2]);
std::string output_mult_str =
fmt::format("[{},{}]", output_mult_[0], output_mult_[1]);
std::string block_str = fmt::format("[{},{},{}]", BlockDim().x,
BlockDim().y, BlockDim().z);
std::string grid_str = fmt::format("[{},{},{}]", GridDim().x,
GridDim().y, GridDim().z);
std::string str = fmt::format(
"REDUCEConfig(element_size_bytes_={}, "
"num_inputs_per_output_={}, num_outputs_={}, "
"step_input_={}, step_output_={}, ctas_per_output_={}, "
"input_mult_={}, output_mult_={}, values_per_thread={}, "
"block={}, grid={}, global_memory_size={})",
element_size_bytes_, num_inputs_per_output_, num_outputs_,
step_input_, step_output_, ctas_per_output_, input_mult_str,
output_mult_str, ValuesPerThread(), block_str, grid_str,
GlobalMemorySize());
return str;
}
};
template <int nt, typename R>
OPEN3D_LAUNCH_BOUNDS_2(nt, 4)
__global__ void ReduceKernel(R reduction) {
reduction.Run();
}
template <typename index_t>
static OffsetCalculator<2, index_t> MakeOutputCalculator(
const Indexer& indexer) {
int num_reduction_dims = indexer.NumReductionDims();
int num_output_dims = indexer.NumDims() - num_reduction_dims;
std::array<const int64_t*, 2> strides = {
indexer.GetOutput().byte_strides_ + num_reduction_dims,
indexer.GetInput(0).byte_strides_ + num_reduction_dims,
};
const int64_t* shape = indexer.GetMasterShape() + num_reduction_dims;
return OffsetCalculator<2, index_t>(num_output_dims, shape, strides.data());
}
template <typename index_t>
static OffsetCalculator<1, index_t> MakeInputCalculator(
const Indexer& indexer) {
int num_reduction_dims = indexer.NumReductionDims();
std::array<const int64_t*, 1> strides = {
indexer.GetInput(0).byte_strides_,
};
return OffsetCalculator<1, index_t>(
num_reduction_dims, indexer.GetMasterShape(), strides.data());
}
template <int vt, typename index_t, typename func_t>
OPEN3D_DEVICE void StridedIterate(func_t f,
index_t begin,
index_t end,
index_t stride) {
if (begin + (vt - 1) * stride < end) {
#pragma unroll
for (index_t i = 0; i < vt; i++) {
f(i, begin + i * stride);
}
} else {
#pragma unroll
for (index_t i = 0; i < vt; i++) {
index_t idx = begin + i * stride;
if (idx < end) {
f(i, idx);
}
}
}
}
/// Combime() and Reduce() are the same for regular reduction ops.
template <typename out_scalar_t, typename func_t>
class RegularReduceOps {
using arg_t = typename BinaryFunctionTraits<func_t>::arg0_t;
using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t;
public:
RegularReduceOps(const func_t& op) : reduce_func_(op) {}
static inline OPEN3D_DEVICE out_scalar_t Project(arg_t arg) {
return (out_scalar_t)arg;
}
static inline OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) {
return WARP_SHFL_DOWN(arg, offset);
}
OPEN3D_DEVICE inline arg_t Combine(arg_t acc, scalar_t val) const {
return reduce_func_(acc, val);
}
/// Idx is ignored for RegularReduceOps.
OPEN3D_DEVICE inline arg_t Reduce(arg_t acc,
scalar_t val,
int64_t idx) const {
return reduce_func_(acc, val);
}
private:
func_t reduce_func_ = nullptr;
};
template <typename scalar_t, typename func_t>
RegularReduceOps<scalar_t, func_t> WrapRegularReduceOps(const func_t& op) {
return RegularReduceOps<scalar_t, func_t>{op};
}
template <typename func_t>
class ArgReduceOps {
using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t;
using index_t = int64_t;
using arg_t = thrust::pair<scalar_t, index_t>;
public:
ArgReduceOps(const func_t comp_func) : comp_func_(comp_func) {}
static OPEN3D_DEVICE index_t Project(arg_t arg) { return arg.second; }
static OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) {
return arg_t(WARP_SHFL_DOWN(arg.first, offset),
WARP_SHFL_DOWN(arg.second, offset));
}
/// Combine(pair<val_t, idx_t>, pair<val_t, idx_t>) -> pair<val_t, idx_t>.
/// Called at subsequent rounds of reduction, when values are already
/// associated with indices.
OPEN3D_DEVICE inline arg_t Combine(arg_t a, arg_t b) const {
return comp_func_(a.first, b.first) ? a : b;
}
/// Reduce(pair<val_t, idx_t>, val_t, idx_t) -> pair<val_t, idx_t>.
/// Called at the first round of reduction, when values are not yet
/// associated with indices.
OPEN3D_DEVICE inline arg_t Reduce(arg_t arg,
scalar_t val,
int64_t idx) const {
return comp_func_(arg.first, val) ? arg : arg_t(val, idx);
}
private:
func_t comp_func_ = nullptr;
};
template <typename func_t>
ArgReduceOps<func_t> WrapArgReduceOps(const func_t& comp_func) {
return ArgReduceOps<func_t>{comp_func};
}
template <typename scalar_t,
typename ops_t,
typename index_t,
typename out_scalar_t = scalar_t,
int vt0 = 4>
class ReduceOp {
using traits = FunctionTraits<decltype(&ops_t::Reduce)>;
using arg_t =
typename std::decay<typename traits::template arg<0>::type>::type;
using InputCalculator = OffsetCalculator<1, index_t>;
using OutputCalculator = OffsetCalculator<2, index_t>;
public:
ReduceOp(ops_t ops,
ReduceConfig config,
InputCalculator input_calc,
OutputCalculator output_calc,
const void* src,
char* dst,
void* acc_buf,
void* cta_buf,
int* semaphores,
arg_t identity,
bool accumulate,
bool final_output)
: ops_(ops),
config_(config),
input_calc_(input_calc),
output_calc_(output_calc),
src_(src),
dst_(dst),
acc_buf_(acc_buf),
cta_buf_(cta_buf),
semaphores_(semaphores),
identity_(identity),
accumulate_(accumulate),
final_output_(final_output) {}
OPEN3D_DEVICE void Run() const {
extern __shared__ char shared_memory[];
index_t output_idx = config_.OutputIdx();
index_t input_idx = config_.InputIdx();
auto base_offsets = output_calc_.get(output_idx);
arg_t value = identity_;
if (output_idx < config_.num_outputs_ &&
input_idx < config_.num_inputs_per_output_) {
auto input_slice = (const char*)src_ + base_offsets[1];
value = ThreadReduce((const scalar_t*)input_slice);
}
if (config_.ShouldBlockYReduce()) {
value = BlockYReduce(value, shared_memory);
}
if (config_.ShouldBlockXReduce()) {
value = BlockXReduce(value, shared_memory);
}
auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]);
arg_t* acc = nullptr;
if (acc_buf_ != nullptr) {
int64_t numerator = (int64_t)sizeof(arg_t);
int64_t denominator = (int64_t)sizeof(out_scalar_t);
ReduceFraction(numerator, denominator);
acc = (arg_t*)((char*)acc_buf_ +
(base_offsets[0] * numerator / denominator));
}
if (config_.ShouldGlobalReduce()) {
value = GlobalReduce(value, acc, shared_memory);
} else if (config_.ShouldStore(output_idx)) {
if (acc == nullptr) {
if (accumulate_) {
value = AccumulateInOutput<can_accumulate_in_output>(out,
value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*out = GetAccumulatedOutput<can_accumulate_in_output>(
out, value);
}
} else {
if (accumulate_) {
value = ops_.Combine(*acc, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*acc = value;
}
}
}
}
OPEN3D_DEVICE arg_t ThreadReduce(const scalar_t* data) const {
index_t idx = config_.InputIdx();
// Multiple accumulators to remove dependency between unrolled loops.
arg_t value_list[vt0];
#pragma unroll
for (int i = 0; i < vt0; i++) {
value_list[i] = identity_;
}
index_t end = config_.num_inputs_per_output_;
index_t stride = config_.step_input_;
index_t element_stride = input_calc_.strides_[0][0] / sizeof(scalar_t);
// Reducing layers of function calls so compiler could do proper loop
// unroll that exposes instruction level parallelism.
while (idx < config_.num_inputs_per_output_) {
// load input
SmallArray<scalar_t, vt0> values;
if (input_calc_.dims_ == 1) {
StridedIterate<vt0>(
[&](index_t i, index_t idx) {
values[i] = data[idx * element_stride];
},
idx, end, stride);
} else {
StridedIterate<vt0>(
[&](index_t i, index_t idx) {
values[i] = data[input_calc_.get(idx)[0] /
sizeof(scalar_t)];
},
idx, end, stride);
}
// compute
StridedIterate<vt0, index_t>(
[&](index_t i, index_t idx) {
value_list[i] =
ops_.Reduce(value_list[i], values[i], idx);
},
idx, config_.num_inputs_per_output_, config_.step_input_);
// step offset
idx += config_.step_input_ * vt0;
}
#pragma unroll
for (int i = 1; i < vt0; i++) {
value_list[0] = ops_.Combine(value_list[0], value_list[i]);
}
return value_list[0];
}
OPEN3D_DEVICE arg_t BlockXReduce(arg_t value, char* shared_memory) const {
int dim_x = blockDim.x;
arg_t* shared = (arg_t*)shared_memory;
if (dim_x > warpSize) {
int address_base = threadIdx.x + threadIdx.y * blockDim.x;
shared[address_base] = value;
for (int offset = dim_x / 2; offset >= warpSize; offset >>= 1) {
__syncthreads();
if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) {
arg_t other = shared[address_base + offset];
value = ops_.Combine(value, other);
shared[address_base] = value;
}
}
dim_x = warpSize;
}
__syncthreads();
for (int offset = 1; offset < dim_x; offset <<= 1) {
arg_t other = ops_.WarpShflDown(value, offset);
value = ops_.Combine(value, other);
}
return value;
}
OPEN3D_DEVICE arg_t BlockYReduce(arg_t value, char* shared_memory) const {
arg_t* shared = (arg_t*)shared_memory;
shared[config_.SharedMemoryOffset(0)] = value;
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
arg_t other = shared[config_.SharedMemoryOffset(offset)];
value = ops_.Combine(value, other);
shared[config_.SharedMemoryOffset(0)] = value;
}
}
return value;
}
OPEN3D_DEVICE bool MarkBlockFinished() const {
__shared__ bool is_last_block_done_shared;
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
int prev_blocks_finished = atomicAdd(&semaphores_[blockIdx.x], 1);
is_last_block_done_shared = (prev_blocks_finished == gridDim.y - 1);
}
__syncthreads();
return is_last_block_done_shared;
}
template <bool can_acc>
OPEN3D_DEVICE arg_t AccumulateInOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<can_acc>::type* = nullptr) const {
return ops_.Combine(*out, value);
}
// This function should never be called --
// it's the version of `AccumulateInOutput`
// when accumulation in the output is not possible.
template <bool can_acc>
OPEN3D_DEVICE arg_t AccumulateInOutput(
out_scalar_t*,
arg_t,
typename std::enable_if<!can_acc>::type* = nullptr) const {
OPEN3D_ASSERT(false);
return arg_t{};
}
template <bool can_acc>
OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<can_acc>::type* = nullptr) const {
OPEN3D_ASSERT(!final_output_);
return (out_scalar_t)value;
}
// This function should never be called --
// it's the version of `GetAccumulatedOutput`
// when accumulation in the output is not possible.
template <bool can_acc>
OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<!can_acc>::type* = nullptr) const {
OPEN3D_ASSERT(false);
return *out;
}
template <class T>
OPEN3D_DEVICE void SetResults(const T x, const index_t base_offset) const {
auto res = (out_scalar_t*)((char*)dst_ + base_offset);
*res = x;
}
OPEN3D_DEVICE void SetResultsToOutput(arg_t value,
index_t base_offset) const {
OPEN3D_ASSERT(final_output_);
SetResults(ops_.Project(value), base_offset);
}
OPEN3D_DEVICE arg_t GlobalReduce(arg_t value,
arg_t* acc,
char* shared_memory) const {
arg_t* reduce_buffer = (arg_t*)cta_buf_;
index_t output_idx = config_.OutputIdx();
auto base_offsets = output_calc_.get(output_idx);
auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]);
bool should_store = config_.ShouldStore(config_.OutputIdx());
if (should_store) {
index_t offset = config_.StagingMemoryOffset(blockIdx.y);
reduce_buffer[offset] = value;
}
__threadfence(); // make sure writes are globally visible
__syncthreads(); // if multiple warps in this block wrote to staging,
// make sure they're all done
bool is_last_block_done = MarkBlockFinished();
if (is_last_block_done) {
value = identity_;
if (config_.ShouldBlockXReduce()) {
index_t input_offset = threadIdx.x + threadIdx.y * blockDim.x;
index_t step = blockDim.x * blockDim.y;
for (; input_offset < config_.ctas_per_output_;
input_offset += step) {
index_t idx = config_.StagingMemoryOffset(input_offset);
arg_t next = reduce_buffer[idx];
value = ops_.Combine(value, next);
}
} else {
index_t input_offset = threadIdx.y;
index_t step = blockDim.y;
for (; input_offset < config_.ctas_per_output_;
input_offset += step) {
index_t idx = config_.StagingMemoryOffset(input_offset);
arg_t next = reduce_buffer[idx];
value = ops_.Combine(value, next);
}
}
value = BlockYReduce(value, shared_memory);
if (config_.ShouldBlockXReduce()) {
value = BlockXReduce(value, shared_memory);
}
if (should_store) {
if (acc == nullptr) {
if (accumulate_) {
value = AccumulateInOutput<can_accumulate_in_output>(
out, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*out = GetAccumulatedOutput<can_accumulate_in_output>(
out, value);
}
} else {
if (accumulate_) {
value = ops_.Combine(*acc, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*acc = value;
}
}
}
}
return value;
}
private:
static constexpr bool can_accumulate_in_output =
std::is_convertible<arg_t, out_scalar_t>::value &&
std::is_convertible<out_scalar_t, arg_t>::value;
static constexpr float acc_buffer_multiplier =
(float)sizeof(arg_t) / sizeof(out_scalar_t);
ops_t ops_;
ReduceConfig config_;
InputCalculator input_calc_;
OutputCalculator output_calc_;
const void* src_;
const char* dst_;
// acc_buf_ used for accumulation among sub Tensor Iterator when
// accumulation on output is not permissible
void* acc_buf_;
// cta_buf_ used for accumulation between blocks during global reduction
void* cta_buf_;
int* semaphores_;
arg_t identity_;
bool accumulate_;
bool final_output_;
};
class AccumulationBuffer {
public:
AccumulationBuffer() {}
AccumulationBuffer(int64_t acc_t_size,
int64_t out_t_size,
char* out_ptr,
int64_t size) {
out_ptr_ = (char*)out_ptr;
if (out_t_size >= acc_t_size) {
// reusing output buffer for accumulation.
acc_ptr_ = (char*)out_ptr;
numerator_ = 1;
denominator_ = 1;
} else {
int device_id = CUDAState::GetInstance().GetCurrentDeviceID();
Device device(Device::DeviceType::CUDA, device_id);
buffer_ = std::make_unique<Blob>(size, device);
acc_ptr_ = (char*)buffer_->GetDataPtr();
numerator_ = acc_t_size;
denominator_ = out_t_size;
ReduceFraction(numerator_, denominator_);
}
}
char* GetAccSlice(char* out_ptr) {
if (numerator_ == -1 || acc_ptr_ == nullptr) {
return nullptr;
}
return acc_ptr_ + ((out_ptr - out_ptr_) * numerator_ / denominator_);
}
private:
std::unique_ptr<Blob> buffer_;
char* acc_ptr_ = nullptr;
char* out_ptr_ = nullptr;
float size_factor_ = -1;
int64_t numerator_ = -1;
int64_t denominator_ = -1;
};
class CUDAReductionEngine {
public:
CUDAReductionEngine(const CUDAReductionEngine&) = delete;
CUDAReductionEngine& operator=(const CUDAReductionEngine&) = delete;
CUDAReductionEngine(const Indexer& indexer) : indexer_(indexer) {}
template <typename func_t, typename scalar_t>
void Run(const func_t& reduce_func, scalar_t identity) {
if (indexer_.NumWorkloads() == 0) {
utility::LogError(
"0-sized input should be handled outside of the reudction "
"engine.");
}
if (indexer_.NumInputs() != 1) {
utility::LogError("Reduction op must have exactly one input.");
}
OPEN3D_ASSERT_HOST_DEVICE_LAMBDA(func_t);
using arg0_t = typename BinaryFunctionTraits<func_t>::arg0_t;
using arg1_t = typename BinaryFunctionTraits<func_t>::arg1_t;
if (!std::is_same<scalar_t, arg0_t>::value ||
!std::is_same<scalar_t, arg1_t>::value) {
utility::LogError(
"Function input type must match with the identity's type.");
}
using res_t = typename BinaryFunctionTraits<func_t>::res_t;
if (std::is_same<res_t, bool>::value) {
// func_t is a comparison function (for arg-reduction).
// Signature: (scalar_t, scalar_t) -> bool.
RunReduce<scalar_t, int64_t>(
indexer_, WrapArgReduceOps(reduce_func),
thrust::pair<scalar_t, int64_t>(identity, 0));
} else {
// func_t is a regular reduction function.
// Signature: (scalar_t, scalar_t) -> scalar_t.
RunReduce<scalar_t, scalar_t>(
indexer_, WrapRegularReduceOps<scalar_t>(reduce_func),
identity);
}
}
private:
/// If the index cannot be represented in 32 bits, RunReduce calls itself
/// recursively.
template <typename scalar_t,
typename out_scalar_t,
int vt0 = 4,
typename ops_t,
typename ident_t>
static void RunReduce(Indexer& indexer,
const ops_t& ops,
ident_t identity,
AccumulationBuffer* acc_buf_ptr = nullptr) {
using traits = FunctionTraits<decltype(&ops_t::Reduce)>;
using arg_t = typename traits::template arg<0>::type;
static constexpr bool can_accumulate_in_output =
std::is_convertible<arg_t, out_scalar_t>::value;
bool can_use_32bit_indexing = indexer.CanUse32BitIndexing();
std::unique_ptr<AccumulationBuffer> owned_buf_ptr;
// The acc_buf_ptr is a shared pointer. It is create at the first
// entrance reused by all recursive function calls.
if (acc_buf_ptr == nullptr) {
// acc_buf_ptr holds buffer used for accumulation among multiple
// sub_iter when accumulation in output is not possible.
if (!can_accumulate_in_output && !can_use_32bit_indexing) {
int64_t output_memory_size = 1;
for (int dim = 0; dim < indexer.NumDims(); dim++) {
output_memory_size = std::max(
output_memory_size,
indexer.GetMasterShape()[dim] *
indexer.GetOutput().byte_strides_[dim]);
}
owned_buf_ptr.reset(new AccumulationBuffer(
sizeof(arg_t), sizeof(out_scalar_t),
(char*)indexer.GetOutput().data_ptr_,
output_memory_size * sizeof(arg_t)));
} else {
owned_buf_ptr.reset(new AccumulationBuffer());
}
acc_buf_ptr = owned_buf_ptr.get();
}
if (!can_use_32bit_indexing) {
for (auto& sub_indexer : indexer.SplitTo32BitIndexing()) {
RunReduce<scalar_t, out_scalar_t, vt0>(sub_indexer, ops,
identity, acc_buf_ptr);
}
return;
}
ReduceConfig config(sizeof(arg_t), indexer);
std::unique_ptr<Blob> buffer_blob;
std::unique_ptr<Blob> semaphores_blob;
void* buffer = nullptr;
void* semaphores = nullptr;
if (config.ShouldGlobalReduce()) {
int device_id = CUDAState::GetInstance().GetCurrentDeviceID();
Device device(Device::DeviceType::CUDA, device_id);
buffer_blob =
std::make_unique<Blob>(config.GlobalMemorySize(), device);
semaphores_blob =
std::make_unique<Blob>(config.SemaphoreSize(), device);
buffer = buffer_blob->GetDataPtr();
semaphores = semaphores_blob->GetDataPtr();
OPEN3D_CUDA_CHECK(
cudaMemset(semaphores, 0, config.SemaphoreSize()));
}
OPEN3D_ASSERT(can_use_32bit_indexing);
const char* in_data = (char*)indexer.GetInput(0).data_ptr_;
char* out_data = (char*)indexer.GetOutput().data_ptr_;
char* acc_data = acc_buf_ptr->GetAccSlice(out_data);
auto output_calc = MakeOutputCalculator<uint32_t>(indexer);
auto input_calc = MakeInputCalculator<uint32_t>(indexer);
auto reduce_op = ReduceOp<scalar_t, ops_t, uint32_t, out_scalar_t, vt0>(
ops, config, input_calc, output_calc, in_data, out_data,
acc_data, buffer, (int*)semaphores, identity,
indexer.ShouldAccumulate(), indexer.IsFinalOutput());
// Launch reduce kernel
int shared_memory = config.SharedMemorySize();
ReduceKernel<ReduceConfig::MAX_NUM_THREADS>
<<<config.GridDim(), config.BlockDim(), shared_memory,
core::cuda::GetStream()>>>(reduce_op);
cuda::Synchronize();
OPEN3D_CUDA_CHECK(cudaGetLastError());
}
private:
Indexer indexer_;
};
void ReductionCUDA(const Tensor& src,
Tensor& dst,
const SizeVector& dims,
bool keepdim,
ReductionOpCode op_code) {
if (s_regular_reduce_ops.find(op_code) != s_regular_reduce_ops.end()) {
Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims);
CUDAReductionEngine re(indexer);
Dtype dtype = src.GetDtype();
CUDAScopedDevice scoped_device(src.GetDevice());
DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() {
switch (op_code) {
case ReductionOpCode::Sum:
if (indexer.NumWorkloads() == 0) {
// 0-sized input can be reduced to non-0-sized outputs,
// where identity elements should be filled.
// E.g. np.sum(np.ones((0, 5)), axis=0).shape == (5,).
dst.Fill(0);
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a + b; },
static_cast<scalar_t>(0));
}
break;
case ReductionOpCode::Prod:
if (indexer.NumWorkloads() == 0) {
dst.Fill(1);
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a * b; },
static_cast<scalar_t>(1));
}
break;
case ReductionOpCode::Min:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport Min.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a < b ? a : b; },
static_cast<scalar_t>(
std::numeric_limits<scalar_t>::max()));
}
break;
case ReductionOpCode::Max:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport Max.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a > b ? a : b; },
static_cast<scalar_t>(std::numeric_limits<
scalar_t>::lowest()));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
});
} else if (s_arg_reduce_ops.find(op_code) != s_arg_reduce_ops.end()) {
if (dst.GetDtype() != core::Int64) {
utility::LogError("Arg-reduction must have int64 output dtype.");
}
Indexer indexer({src}, dst, DtypePolicy::INPUT_SAME, dims);
CUDAReductionEngine re(indexer);
Dtype dtype = src.GetDtype();
CUDAScopedDevice scoped_device(src.GetDevice());
DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() {
switch (op_code) {
case ReductionOpCode::ArgMin:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport ArgMin.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> bool { return a < b; },
static_cast<scalar_t>(
std::numeric_limits<scalar_t>::max()));
}
break;
case ReductionOpCode::ArgMax:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport ArgMax.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> bool { return a > b; },
static_cast<scalar_t>(std::numeric_limits<
scalar_t>::lowest()));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
});
} else if (s_boolean_reduce_ops.find(op_code) !=
s_boolean_reduce_ops.end()) {
if (src.GetDtype() != core::Bool) {
utility::LogError(
"Boolean reduction only supports boolean input tensor.");
}
if (dst.GetDtype() != core::Bool) {
utility::LogError(
"Boolean reduction only supports boolean output tensor.");
}
Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims);
CUDAReductionEngine re(indexer);
CUDAScopedDevice scoped_device(src.GetDevice());
switch (op_code) {
case ReductionOpCode::All:
if (indexer.NumWorkloads() == 0) {
dst.Fill(true);
} else {
re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b)
-> uint8_t { return a && b; },
static_cast<uint8_t>(true));
}
break;
case ReductionOpCode::Any:
if (indexer.NumWorkloads() == 0) {
dst.Fill(false);
} else {
re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b)
-> uint8_t { return a || b; },
static_cast<uint8_t>(false));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
} else {
utility::LogError("Unsupported op code.");
}
}
} // namespace kernel
} // namespace core
} // namespace open3d
|
d897582d082f27e8bbd0c73e50001062680faece.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdafx.h"
#include "cuda_tensor.h"
__global__ static void forward_maxpool_kernel_nchw(float* out, const float* in, int* indexes, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
//output_width = (input_width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_w = ( width - 1) * stride_w + window_w - pad_wl - pad_wr;
//output_height = (input_height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_h = (height - 1) * stride_h + window_h - pad_ht - pad_hb;
int src_size = src_w * src_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
out[dst_i] = -INFINITY;
for (int y = dst_y * stride_h - pad_ht, i = 0; i < window_h; i++,y++) {
if (y >= 0 && y < src_h) {
for (int x = dst_x * stride_w - pad_wl, j = 0; j < window_w; j++, x++) {
if (x >= 0 && x < src_w) {
int src_i = (b * channels + c) * src_size + y * src_w + x;
if (in[src_i] > out[dst_i]) {
out[dst_i] = in[src_i];
indexes[dst_i] = src_i;
}
}
}
}
}
dst_i += threads;
}
}
bool forward_maxpool(CudaTensor& output, const CudaTensor& input, int* indexes,
int window, int stride, int pad) {
if (input.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(input.Elements());
if (!f16_to_f32(in, reinterpret_cast<__half*>(input.Data()), input.Elements())) {
hipFree(in);
return false;
}
CudaPtr<float> out(output.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( forward_maxpool_kernel_nchw), dim3(g),dim3(b), 0, 0, out, in, indexes, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(output.Data()), out, output.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(output.Data());
float* in = reinterpret_cast<float*>(input.Data());
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( forward_maxpool_kernel_nchw), dim3(g), dim3(b), 0, 0, out, in, indexes, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
}
return true;
}
__global__ static void backward_maxpool_kernel_nchw(float* out, const float* in, int* indexes, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
int src_w = (width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_h = (height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_size = src_w * src_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
int src_y = (dst_y + pad_ht) / stride_h;
int src_x = (dst_x + pad_wl) / stride_w;
//TODO: makesure src_x and src_y is in the matrix
int src_i = (b * channels + c) * src_size + src_y * src_w + src_x;
if (indexes[src_i] == dst_i)
out[dst_i] += in[src_i];
dst_i += threads;
}
}
bool backward_maxpool(CudaTensor& dx, const CudaTensor& dy, int* indexes,
int window, int stride, int pad) {
dx = 0.0f;
if (dx.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(dy.Elements());
if (f16_to_f32(in, reinterpret_cast<__half*>(dy.Data()), dy.Elements())) {
hipFree(in);
return false;
}
CudaPtr<float> out(dx.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( backward_maxpool_kernel_nchw), dim3(g),dim3(b), 0, 0, out, in, indexes, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(dx.Data()), out, dx.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(dx.Data());
float* in = reinterpret_cast<float*>(dy.Data());
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
hipLaunchKernelGGL(( backward_maxpool_kernel_nchw), dim3(g),dim3(b), 0, 0, out, in, indexes, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
hipError_t e = hipDeviceSynchronize();
if (e != hipSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
}
return true;
} | d897582d082f27e8bbd0c73e50001062680faece.cu | #include "stdafx.h"
#include "cuda_tensor.h"
__global__ static void forward_maxpool_kernel_nchw(float* out, const float* in, int* indexes, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
//output_width = (input_width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_w = ( width - 1) * stride_w + window_w - pad_wl - pad_wr;
//output_height = (input_height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_h = (height - 1) * stride_h + window_h - pad_ht - pad_hb;
int src_size = src_w * src_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
out[dst_i] = -INFINITY;
for (int y = dst_y * stride_h - pad_ht, i = 0; i < window_h; i++,y++) {
if (y >= 0 && y < src_h) {
for (int x = dst_x * stride_w - pad_wl, j = 0; j < window_w; j++, x++) {
if (x >= 0 && x < src_w) {
int src_i = (b * channels + c) * src_size + y * src_w + x;
if (in[src_i] > out[dst_i]) {
out[dst_i] = in[src_i];
indexes[dst_i] = src_i;
}
}
}
}
}
dst_i += threads;
}
}
bool forward_maxpool(CudaTensor& output, const CudaTensor& input, int* indexes,
int window, int stride, int pad) {
if (input.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(input.Elements());
if (!f16_to_f32(in, reinterpret_cast<__half*>(input.Data()), input.Elements())) {
cudaFree(in);
return false;
}
CudaPtr<float> out(output.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
forward_maxpool_kernel_nchw<<<g,b>>>(out, in, indexes, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(output.Data()), out, output.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(output.Data());
float* in = reinterpret_cast<float*>(input.Data());
if (input.DataFormat() == CUDNN_TENSOR_NCHW) {
forward_maxpool_kernel_nchw<<<g, b>>>(out, in, indexes, output.Elements(), output.Channel(),
output.Width(), output.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
}
return true;
}
__global__ static void backward_maxpool_kernel_nchw(float* out, const float* in, int* indexes, int elements,
int channels, int width, int height, int window, int stride, int pad) {
int dst_i = blockIdx.x * blockDim.x + threadIdx.x;
int threads = gridDim.x * blockDim.x;
int dst_size = width * height;
int dst_c_size = channels * dst_size;
int stride_w = stride & 0xffff;
int stride_h = stride >> 16;
int window_w = window & 0xffff;
int window_h = window >> 16;
int pad_hb = pad & 0xff;
int pad_ht = (pad >> 8) & 0xff;
int pad_wr = (pad >> 16) & 0xff;
int pad_wl = pad >> 24;
int src_w = (width + pad_wl + pad_wr - window_w) / stride_w + 1;
int src_h = (height + pad_ht + pad_hb - window_h) / stride_h + 1;
int src_size = src_w * src_h;
while (dst_i < elements) {
int b = dst_i / dst_c_size;
int temp = dst_i % dst_c_size;
int c = temp / dst_size;
temp = temp % dst_size;
int dst_y = temp / width;
int dst_x = temp % width;
int src_y = (dst_y + pad_ht) / stride_h;
int src_x = (dst_x + pad_wl) / stride_w;
//TODO: makesure src_x and src_y is in the matrix
int src_i = (b * channels + c) * src_size + src_y * src_w + src_x;
if (indexes[src_i] == dst_i)
out[dst_i] += in[src_i];
dst_i += threads;
}
}
bool backward_maxpool(CudaTensor& dx, const CudaTensor& dy, int* indexes,
int window, int stride, int pad) {
dx = 0.0f;
if (dx.DataType() == CUDNN_DATA_HALF) {
CudaPtr<float> in(dy.Elements());
if (f16_to_f32(in, reinterpret_cast<__half*>(dy.Data()), dy.Elements())) {
cudaFree(in);
return false;
}
CudaPtr<float> out(dx.Elements());
int g = GPUGridSize();
int b = GPUBlockSize();
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
backward_maxpool_kernel_nchw<<<g,b>>>(out, in, indexes, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
return f32_to_f16(reinterpret_cast<__half*>(dx.Data()), out, dx.Elements());
}
else {
int g = GPUGridSize();
int b = GPUBlockSize();
float* out = reinterpret_cast<float*>(dx.Data());
float* in = reinterpret_cast<float*>(dy.Data());
if (dy.DataFormat() == CUDNN_TENSOR_NCHW) {
backward_maxpool_kernel_nchw<<<g,b>>>(out, in, indexes, dx.Elements(), dx.Channel(),
dx.Width(), dx.Height(), window, stride, pad);
}
else {
//TODO: finish
return false;
}
cudaError_t e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
cerr << " Error: forward_maxpool failed!\n";
return false;
}
}
return true;
} |
59c0f3b4ec0036367e346adf5dc60977619b1aa8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unistd.h>
#include <stdio.h>
/* we need these includes for CUDA's random number stuff */
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
// sample m points from n points
// input (n,3)
// output (m,3)
__global__ void farthestpointsamplingKernel(int n,int m, const int random_init, float *temp, const float *dataset, int *idxs){
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
int old_id = random_init;
idxs[0] = old_id;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[j]=1e38;
}
for(int i=1;i<m;i++){
int besti=0;
float best=-1;
float x0 = dataset[old_id*3+0];
float y0 = dataset[old_id*3+1];
float z0 = dataset[old_id*3+2];
for(int j=threadIdx.x;j<n;j+=blockDim.x){
float td=temp[j];
float x1 = dataset[j*3+0];
float y1 = dataset[j*3+1];
float z1 = dataset[j*3+2];
float dist = (x1-x0)*(x1-x0)+(y1-y0)*(y1-y0)+(z1-z0)*(z1-z0);
float d2=min(dist,td);
if (d2!=td)
temp[j]=d2;
if (d2>best){
best=d2;
besti=j;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old_id=dists_i[0];
if (threadIdx.x==0)
idxs[i]=old_id;
}
}
extern "C" void farthestpointsampling(int n, int m, const int random_init, float *temp_host, const float *dataset_host, int *idxs_host){
float *temp, *dataset;
int *idxs;
hipError_t error;
hipMalloc((void**)&dataset, sizeof(float)* n*3);
hipMalloc((void**)&temp, sizeof(float)* n);
hipMalloc((void**)&idxs, sizeof(int)* m);
hipMemcpy(dataset, dataset_host, sizeof(float)* n*3, hipMemcpyHostToDevice);
hipMemcpy(temp, temp_host, sizeof(float)* n, hipMemcpyHostToDevice);
hipMemcpy(idxs, idxs_host, sizeof(int)* m, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( farthestpointsamplingKernel), dim3(1), dim3(512), 0, 0, n, m, random_init, temp, dataset, idxs);
error = hipDeviceSynchronize();
if(error != hipSuccess){
printf("code: %d, reason: %s\n",error,hipGetErrorString(error));
}
hipMemcpy(idxs_host, idxs, sizeof(int)*m, hipMemcpyDeviceToHost);
hipFree(temp);
hipFree(dataset);
hipFree(idxs);
}
__global__ void fps_multiblocks_Kernel(int num_sample, int num_block, const int *num_pts, const int *start_id, const int *sort_id, const int *seed, float *temp, const float *dataset, int *idxs){
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<num_block;i+=gridDim.x){
if(num_pts[i]==0){
printf("num_pts cannot be zero!");
return;
}
if(num_pts[i]<num_sample ){
for (int j=threadIdx.x;j<num_sample;j+=blockDim.x){
int tid = j;
if(j>=num_pts[i]){
hiprandState_t state;
hiprand_init(0, 0, 0,&state);
tid = hiprand(&state) % num_pts[i];
}
idxs[i*num_sample+j]=sort_id[ start_id[i]+tid ];
}
}
else if(num_pts[i]==num_sample ){
for (int j=threadIdx.x;j<num_sample;j+=blockDim.x)
idxs[i*num_sample+j]=sort_id[ start_id[i]+j ];
}
else{
int old=seed[i];
//printf("old: %d\t", old);
idxs[i*num_sample]=sort_id[old+start_id[i]];
for (int j=threadIdx.x;j<num_pts[i];j+=blockDim.x){
temp[start_id[i] + j]=1e38;
}
for (int j=1;j<num_sample;j++){
int besti=0;
float best=-1;
float x1=dataset[start_id[i]*3 + old*3+0];
float y1=dataset[start_id[i]*3 + old*3+1];
float z1=dataset[start_id[i]*3 + old*3+2];
for (int k=threadIdx.x;k<num_pts[i];k+=blockDim.x){
float td=temp[start_id[i] + k];
float x2,y2,z2;
x2=dataset[start_id[i]*3 + k*3+0];
y2=dataset[start_id[i]*3 + k*3+1];
z2=dataset[start_id[i]*3 + k*3+2];
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[start_id[i] + k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
//if(j<3)
// printf("old2: %d\t", old);
if (threadIdx.x==0)
idxs[i*num_sample+j]=sort_id[old+start_id[i]];
}
}
}
}
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, hiprandState_t* states) {
/* we have to initialize the state */
hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x]);
}
/* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */
__global__ void randoms(hiprandState_t* states, int* numbers, int *upbound) {
/* hiprand works like rand - except that it takes a state as a parameter */
numbers[blockIdx.x] = hiprand(&states[blockIdx.x]) % upbound[blockIdx.x];
}
__global__ void print(int *data, int num){
for(int i=0;i<num;i++)
printf("init_seed: %d\t", data[i]);
}
extern "C" void fps_multiblocks(int totalnum_pts, int num_sample, int num_block, const int *num_pts_host, const int *start_id_host, const int *sort_id_host, const float *dataset_host, int *idxs_host){
float *temp, *dataset;
int *num_pts, *start_id, *idxs, *seed, *sort_id;
hipError_t error;
hiprandState_t* states;
hipMalloc((void**)&dataset, sizeof(float)* totalnum_pts*3);
hipMalloc((void**)&temp, sizeof(float)* totalnum_pts);
hipMalloc((void**)&sort_id, sizeof(int)* totalnum_pts);
hipMalloc((void**)&idxs, sizeof(int)* num_sample*num_block);
hipMalloc((void**)&num_pts, sizeof(int)*num_block);
hipMalloc((void**)&start_id, sizeof(int)*num_block);
hipMalloc((void**)&seed, sizeof(int)*num_block);
hipMalloc((void**) &states, num_block * sizeof(hiprandState_t));
hipMemcpy(dataset, dataset_host, sizeof(float)* totalnum_pts*3, hipMemcpyHostToDevice);
hipMemcpy(sort_id, sort_id_host, sizeof(int)* totalnum_pts, hipMemcpyHostToDevice);
hipMemcpy(num_pts, num_pts_host, sizeof(int)* num_block, hipMemcpyHostToDevice);
hipMemcpy(start_id, start_id_host, sizeof(int)* num_block, hipMemcpyHostToDevice);
hipMemcpy(idxs, idxs_host, sizeof(int)* num_sample*num_block, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( init), dim3(num_block), dim3(1), 0, 0, time(0), states);
hipLaunchKernelGGL(( randoms), dim3(num_block), dim3(1), 0, 0, states, seed, num_pts);
hipLaunchKernelGGL(( fps_multiblocks_Kernel), dim3(64), dim3(512), 0, 0, num_sample, num_block, num_pts, start_id, sort_id,seed, temp, dataset, idxs);
//int batchsize=16;
//int num_batch=num_block/batchsize+1;
//for (int i=0;i<num_batch;i++){
// fps_multiblocks_Kernel<<<batchsize, 256>>>(i, num_sample, num_block, num_pts, start_id, sort_id,seed, temp, dataset, idxs);
//}
error = hipDeviceSynchronize();
if(error != hipSuccess){
printf("code: %d, reason: %s\n",error,hipGetErrorString(error));
}
/*
printf("num_block: %d\n", num_block);
print<<<1,1>>>(start_id,num_block);
printf("num_pts: \n");
print<<<1,1>>>(num_pts,num_block);
*/
hipMemcpy(idxs_host, idxs, sizeof(int)*num_sample*num_block, hipMemcpyDeviceToHost);
hipFree(states);
hipFree(temp);
hipFree(dataset);
hipFree(idxs);
hipFree(start_id);
hipFree(num_pts);
hipFree(seed);
hipFree(sort_id);
}
| 59c0f3b4ec0036367e346adf5dc60977619b1aa8.cu | #include <unistd.h>
#include <stdio.h>
/* we need these includes for CUDA's random number stuff */
#include <curand.h>
#include <curand_kernel.h>
// sample m points from n points
// input (n,3)
// output (m,3)
__global__ void farthestpointsamplingKernel(int n,int m, const int random_init, float *temp, const float *dataset, int *idxs){
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
int old_id = random_init;
idxs[0] = old_id;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[j]=1e38;
}
for(int i=1;i<m;i++){
int besti=0;
float best=-1;
float x0 = dataset[old_id*3+0];
float y0 = dataset[old_id*3+1];
float z0 = dataset[old_id*3+2];
for(int j=threadIdx.x;j<n;j+=blockDim.x){
float td=temp[j];
float x1 = dataset[j*3+0];
float y1 = dataset[j*3+1];
float z1 = dataset[j*3+2];
float dist = (x1-x0)*(x1-x0)+(y1-y0)*(y1-y0)+(z1-z0)*(z1-z0);
float d2=min(dist,td);
if (d2!=td)
temp[j]=d2;
if (d2>best){
best=d2;
besti=j;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old_id=dists_i[0];
if (threadIdx.x==0)
idxs[i]=old_id;
}
}
extern "C" void farthestpointsampling(int n, int m, const int random_init, float *temp_host, const float *dataset_host, int *idxs_host){
float *temp, *dataset;
int *idxs;
cudaError_t error;
cudaMalloc((void**)&dataset, sizeof(float)* n*3);
cudaMalloc((void**)&temp, sizeof(float)* n);
cudaMalloc((void**)&idxs, sizeof(int)* m);
cudaMemcpy(dataset, dataset_host, sizeof(float)* n*3, cudaMemcpyHostToDevice);
cudaMemcpy(temp, temp_host, sizeof(float)* n, cudaMemcpyHostToDevice);
cudaMemcpy(idxs, idxs_host, sizeof(int)* m, cudaMemcpyHostToDevice);
farthestpointsamplingKernel<<<1, 512>>>(n, m, random_init, temp, dataset, idxs);
error = cudaDeviceSynchronize();
if(error != cudaSuccess){
printf("code: %d, reason: %s\n",error,cudaGetErrorString(error));
}
cudaMemcpy(idxs_host, idxs, sizeof(int)*m, cudaMemcpyDeviceToHost);
cudaFree(temp);
cudaFree(dataset);
cudaFree(idxs);
}
__global__ void fps_multiblocks_Kernel(int num_sample, int num_block, const int *num_pts, const int *start_id, const int *sort_id, const int *seed, float *temp, const float *dataset, int *idxs){
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<num_block;i+=gridDim.x){
if(num_pts[i]==0){
printf("num_pts cannot be zero!");
return;
}
if(num_pts[i]<num_sample ){
for (int j=threadIdx.x;j<num_sample;j+=blockDim.x){
int tid = j;
if(j>=num_pts[i]){
curandState_t state;
curand_init(0, 0, 0,&state);
tid = curand(&state) % num_pts[i];
}
idxs[i*num_sample+j]=sort_id[ start_id[i]+tid ];
}
}
else if(num_pts[i]==num_sample ){
for (int j=threadIdx.x;j<num_sample;j+=blockDim.x)
idxs[i*num_sample+j]=sort_id[ start_id[i]+j ];
}
else{
int old=seed[i];
//printf("old: %d\t", old);
idxs[i*num_sample]=sort_id[old+start_id[i]];
for (int j=threadIdx.x;j<num_pts[i];j+=blockDim.x){
temp[start_id[i] + j]=1e38;
}
for (int j=1;j<num_sample;j++){
int besti=0;
float best=-1;
float x1=dataset[start_id[i]*3 + old*3+0];
float y1=dataset[start_id[i]*3 + old*3+1];
float z1=dataset[start_id[i]*3 + old*3+2];
for (int k=threadIdx.x;k<num_pts[i];k+=blockDim.x){
float td=temp[start_id[i] + k];
float x2,y2,z2;
x2=dataset[start_id[i]*3 + k*3+0];
y2=dataset[start_id[i]*3 + k*3+1];
z2=dataset[start_id[i]*3 + k*3+2];
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[start_id[i] + k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
//if(j<3)
// printf("old2: %d\t", old);
if (threadIdx.x==0)
idxs[i*num_sample+j]=sort_id[old+start_id[i]];
}
}
}
}
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, curandState_t* states) {
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x]);
}
/* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */
__global__ void randoms(curandState_t* states, int* numbers, int *upbound) {
/* curand works like rand - except that it takes a state as a parameter */
numbers[blockIdx.x] = curand(&states[blockIdx.x]) % upbound[blockIdx.x];
}
__global__ void print(int *data, int num){
for(int i=0;i<num;i++)
printf("init_seed: %d\t", data[i]);
}
extern "C" void fps_multiblocks(int totalnum_pts, int num_sample, int num_block, const int *num_pts_host, const int *start_id_host, const int *sort_id_host, const float *dataset_host, int *idxs_host){
float *temp, *dataset;
int *num_pts, *start_id, *idxs, *seed, *sort_id;
cudaError_t error;
curandState_t* states;
cudaMalloc((void**)&dataset, sizeof(float)* totalnum_pts*3);
cudaMalloc((void**)&temp, sizeof(float)* totalnum_pts);
cudaMalloc((void**)&sort_id, sizeof(int)* totalnum_pts);
cudaMalloc((void**)&idxs, sizeof(int)* num_sample*num_block);
cudaMalloc((void**)&num_pts, sizeof(int)*num_block);
cudaMalloc((void**)&start_id, sizeof(int)*num_block);
cudaMalloc((void**)&seed, sizeof(int)*num_block);
cudaMalloc((void**) &states, num_block * sizeof(curandState_t));
cudaMemcpy(dataset, dataset_host, sizeof(float)* totalnum_pts*3, cudaMemcpyHostToDevice);
cudaMemcpy(sort_id, sort_id_host, sizeof(int)* totalnum_pts, cudaMemcpyHostToDevice);
cudaMemcpy(num_pts, num_pts_host, sizeof(int)* num_block, cudaMemcpyHostToDevice);
cudaMemcpy(start_id, start_id_host, sizeof(int)* num_block, cudaMemcpyHostToDevice);
cudaMemcpy(idxs, idxs_host, sizeof(int)* num_sample*num_block, cudaMemcpyHostToDevice);
init<<<num_block, 1>>>(time(0), states);
randoms<<<num_block, 1>>>(states, seed, num_pts);
fps_multiblocks_Kernel<<<64, 512>>>(num_sample, num_block, num_pts, start_id, sort_id,seed, temp, dataset, idxs);
//int batchsize=16;
//int num_batch=num_block/batchsize+1;
//for (int i=0;i<num_batch;i++){
// fps_multiblocks_Kernel<<<batchsize, 256>>>(i, num_sample, num_block, num_pts, start_id, sort_id,seed, temp, dataset, idxs);
//}
error = cudaDeviceSynchronize();
if(error != cudaSuccess){
printf("code: %d, reason: %s\n",error,cudaGetErrorString(error));
}
/*
printf("num_block: %d\n", num_block);
print<<<1,1>>>(start_id,num_block);
printf("num_pts: \n");
print<<<1,1>>>(num_pts,num_block);
*/
cudaMemcpy(idxs_host, idxs, sizeof(int)*num_sample*num_block, cudaMemcpyDeviceToHost);
cudaFree(states);
cudaFree(temp);
cudaFree(dataset);
cudaFree(idxs);
cudaFree(start_id);
cudaFree(num_pts);
cudaFree(seed);
cudaFree(sort_id);
}
|
599465ae7308b6faf88cddf772a75ce10e0d22fc.hip | // !!! This is a file automatically generated by hipify!!!
// Simple Matrix Multiply - Workshop 6
// w6.cu
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <chrono>
// add CUDA runtime header file
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
using namespace std::chrono;
const int ntpb = 32; // number of threads per block
// - add your kernel here
__global__ void initialize(float* a, float* b, float* c, int n) {
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row < n && col < n) {
float sum = 0.0f;
for (int i = 0; i < n; i++) {
sum += a[col * n + i] * b[i * n + row];
}
c[col * n + row] = sum;
}
}
// check reports error if any
//
void check(const char* msg, const hipError_t err) {
if (err != hipSuccess)
std::cerr << "*** " << msg << ":" << hipGetErrorString(err) << " ***\n";
}
// display matrix M, which is stored in row-major order
//
void display(const char* str, const float* M, int nr, int nc)
{
std::cout << str << std::endl;
std::cout << std::fixed << std::setprecision(4);
for (int i = 0; i < nr; i++) {
for (int j = 0; j < nc; j++)
std::cout << std::setw(10)
<< M[i * nc + j];
std::cout << std::endl;
}
std::cout << std::endl;
}
// report system time
//
void reportTime(const char* msg, steady_clock::duration span) {
auto ms = duration_cast<milliseconds>(span);
std::cout << msg << " - took - " <<
ms.count() << " millisecs" << std::endl;
}
// matrix multiply
//
void sgemm(float* h_a, float* h_b, float* h_c, int n) {
// - calculate number of blocks for n rows
int blocks = (n + ntpb - 1) / ntpb;
// allocate memory for matrices d_a, d_b, d_c on the device
// - add your allocation code here
float* d_a = nullptr;
float* d_b = nullptr;
float* d_c = nullptr;
hipMalloc((void**)&d_a, n * n * sizeof(float));
hipMalloc((void**)&d_b, n * n * sizeof(float));
hipMalloc((void**)&d_c, n * n * sizeof(float));
// copy h_a and h_b to d_a and d_b (host to device)
// - add your copy code here
hipMemcpy(d_a, h_a, n * n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, n * n * sizeof(float), hipMemcpyHostToDevice);
// launch execution configuration
// - define your 2D grid of blocks
dim3 dGrid(blocks, blocks, 1);
// - define your 2D block of threads
dim3 dBlock(ntpb, ntpb, 1);
// - launch your execution configuration
hipError_t error = hipGetLastError();
initialize << <dGrid, dBlock >> > (d_a, d_b, d_c, n);
// - check for launch termination
check("Launch Termination", error);
// copy d_c to h_c (device to host)
// - add your copy code here
hipMemcpy(h_c, d_c, n * n * sizeof(float), hipMemcpyDeviceToHost);
// deallocate device memory
// - add your deallocation code here
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// reset the device
hipDeviceReset();
}
int main(int argc, char* argv[]) {
if (argc != 2) {
std::cerr << argv[0] << ": invalid number of arguments\n";
std::cerr << "Usage: " << argv[0] << " size_of_vector\n";
return 1;
}
int n = std::atoi(argv[1]); // number of rows/columns in h_a, h_b, h_c
steady_clock::time_point ts, te;
// allocate host memory
ts = steady_clock::now();
float* h_a = new float[n * n];
float* h_b = new float[n * n];
float* h_c = new float[n * n];
// populate host matrices a and b
for (int i = 0, kk = 0; i < n; i++)
for (int j = 0; j < n; j++, kk++)
h_a[kk] = h_b[kk] = (float)kk / (n * n);
te = steady_clock::now();
reportTime("allocation and initialization", te - ts);
// h_c = h_a * h_b
ts = steady_clock::now();
sgemm(h_a, h_b, h_c, n);
te = steady_clock::now();
reportTime("matrix-matrix multiplication", te - ts);
// display results
if (n <= 5) {
display("h_a :", h_a, n, n);
display("h_b :", h_b, n, n);
display("h_c = h_a h_b :", h_c, n, n);
}
// check correctness
std::cout << "correctness test ..." << std::endl;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++) {
float sum = 0.0f;
for (int k = 0; k < n; k++)
sum += h_a[i * n + k] * h_b[k * n + j];
if (std::abs(h_c[i * n + j] - sum) > 1.0e-3f)
std::cout << "[" << i << "," << j << "]" << h_c[i * n + j]
<< " != " << sum << std::endl;
}
std::cout << "done" << std::endl;
// deallocate host memory
delete[] h_a;
delete[] h_b;
delete[] h_c;
} | 599465ae7308b6faf88cddf772a75ce10e0d22fc.cu | // Simple Matrix Multiply - Workshop 6
// w6.cu
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <chrono>
// add CUDA runtime header file
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
using namespace std::chrono;
const int ntpb = 32; // number of threads per block
// - add your kernel here
__global__ void initialize(float* a, float* b, float* c, int n) {
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row < n && col < n) {
float sum = 0.0f;
for (int i = 0; i < n; i++) {
sum += a[col * n + i] * b[i * n + row];
}
c[col * n + row] = sum;
}
}
// check reports error if any
//
void check(const char* msg, const cudaError_t err) {
if (err != cudaSuccess)
std::cerr << "*** " << msg << ":" << cudaGetErrorString(err) << " ***\n";
}
// display matrix M, which is stored in row-major order
//
void display(const char* str, const float* M, int nr, int nc)
{
std::cout << str << std::endl;
std::cout << std::fixed << std::setprecision(4);
for (int i = 0; i < nr; i++) {
for (int j = 0; j < nc; j++)
std::cout << std::setw(10)
<< M[i * nc + j];
std::cout << std::endl;
}
std::cout << std::endl;
}
// report system time
//
void reportTime(const char* msg, steady_clock::duration span) {
auto ms = duration_cast<milliseconds>(span);
std::cout << msg << " - took - " <<
ms.count() << " millisecs" << std::endl;
}
// matrix multiply
//
void sgemm(float* h_a, float* h_b, float* h_c, int n) {
// - calculate number of blocks for n rows
int blocks = (n + ntpb - 1) / ntpb;
// allocate memory for matrices d_a, d_b, d_c on the device
// - add your allocation code here
float* d_a = nullptr;
float* d_b = nullptr;
float* d_c = nullptr;
cudaMalloc((void**)&d_a, n * n * sizeof(float));
cudaMalloc((void**)&d_b, n * n * sizeof(float));
cudaMalloc((void**)&d_c, n * n * sizeof(float));
// copy h_a and h_b to d_a and d_b (host to device)
// - add your copy code here
cudaMemcpy(d_a, h_a, n * n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, n * n * sizeof(float), cudaMemcpyHostToDevice);
// launch execution configuration
// - define your 2D grid of blocks
dim3 dGrid(blocks, blocks, 1);
// - define your 2D block of threads
dim3 dBlock(ntpb, ntpb, 1);
// - launch your execution configuration
cudaError_t error = cudaGetLastError();
initialize << <dGrid, dBlock >> > (d_a, d_b, d_c, n);
// - check for launch termination
check("Launch Termination", error);
// copy d_c to h_c (device to host)
// - add your copy code here
cudaMemcpy(h_c, d_c, n * n * sizeof(float), cudaMemcpyDeviceToHost);
// deallocate device memory
// - add your deallocation code here
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// reset the device
cudaDeviceReset();
}
int main(int argc, char* argv[]) {
if (argc != 2) {
std::cerr << argv[0] << ": invalid number of arguments\n";
std::cerr << "Usage: " << argv[0] << " size_of_vector\n";
return 1;
}
int n = std::atoi(argv[1]); // number of rows/columns in h_a, h_b, h_c
steady_clock::time_point ts, te;
// allocate host memory
ts = steady_clock::now();
float* h_a = new float[n * n];
float* h_b = new float[n * n];
float* h_c = new float[n * n];
// populate host matrices a and b
for (int i = 0, kk = 0; i < n; i++)
for (int j = 0; j < n; j++, kk++)
h_a[kk] = h_b[kk] = (float)kk / (n * n);
te = steady_clock::now();
reportTime("allocation and initialization", te - ts);
// h_c = h_a * h_b
ts = steady_clock::now();
sgemm(h_a, h_b, h_c, n);
te = steady_clock::now();
reportTime("matrix-matrix multiplication", te - ts);
// display results
if (n <= 5) {
display("h_a :", h_a, n, n);
display("h_b :", h_b, n, n);
display("h_c = h_a h_b :", h_c, n, n);
}
// check correctness
std::cout << "correctness test ..." << std::endl;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++) {
float sum = 0.0f;
for (int k = 0; k < n; k++)
sum += h_a[i * n + k] * h_b[k * n + j];
if (std::abs(h_c[i * n + j] - sum) > 1.0e-3f)
std::cout << "[" << i << "," << j << "]" << h_c[i * n + j]
<< " != " << sum << std::endl;
}
std::cout << "done" << std::endl;
// deallocate host memory
delete[] h_a;
delete[] h_b;
delete[] h_c;
} |
cdfb361e7953bbe35e3ea807435f2ac4bcbed994.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
#define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#define IN
#define OUT
#define INOUT
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
hipEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(hipEventCreate(&cuda_timer_start));
CUDA_CALL(hipEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(hipEventDestroy(cuda_timer_start));
CUDA_CALL(hipEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
hipEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
hipEventRecord(cuda_timer_stop, CUDA_STREAM_0);
hipEventSynchronize(cuda_timer_stop);
hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
#define N_SIZE (1 << 26) //
#define NF_SIZE (1 << 6) // Nf
#define NO_SHARED 0 // shared memory flag
#define SHARED 1 // shared memory flag
#define BLOCK_SIZE (1 << 6) // CUDA thread block
#define BLOCK_WIDTH (1 << 3)
#define BLOCK_HEIGHT (BLOCK_SIZE / BLOCK_WIDTH)
#define N_ITERATION (1 << 0) //
extern __shared__ int shared_buffer[];
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
int N;
int Nf;
int *h_ArrayElements;
int *h_SumOfArrayElements_CPU;
int *h_SumOfArrayElements_GPU_No_Shared;
int *h_SumOfArrayElements_GPU_Shared;
hipError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf
// shared .
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel_No_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
for (int i = -Nf; i <= Nf; i++) {
if (id + i >= N || id + i < 0) continue;
d_SumOfArrayElements[id] += d_ArrayElements[id + i];
}
// sum registor
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf
// shared .
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
int i;
/*Todo*/
if (thread_id == 0)
{
for (i = 0; i < Nf; i++)
{
if (id + i < Nf) shared_buffer[i] = 0;
else shared_buffer[i] = d_ArrayElements[id + i - Nf];
}
}
if (thread_id == BLOCK_SIZE-1)
{
for (i = 0; i <= Nf; i++)
{
if (id + i >= N) shared_buffer[thread_id+ i + Nf ] = 0;
else shared_buffer[thread_id + i + Nf] = d_ArrayElements[id + i];
}
}
shared_buffer[thread_id + Nf] = d_ArrayElements[id];
__syncthreads();
for (i = 0; i <= 2*Nf; i++)
{
d_SumOfArrayElements[id] += shared_buffer[thread_id + i];
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf C
// GPU kernel
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Sum_n_elements_CPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_CPU, int Nf) {
int i, j, sum;
for (i = 0; i < N; i++) {
sum = 0;
for (j = -Nf; j <= Nf; j++) {
if (i + j >= N || i + j < 0) continue;
sum += p_ArrayElements[i + j];
}
p_SumOfElements_CPU[i] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// bin
// 4 , 4 Nf , N int
// -100 ~ 100
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void read_bin_file() {
printf("***Binary File Read Start!!\n");
FILE *fp = fopen("gen.bin", "rb");
fread(&N, sizeof(int), 1, fp);
fread(&Nf, sizeof(int), 1, fp);
h_ArrayElements = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_CPU = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_No_Shared = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_Shared = (int *)malloc(N * sizeof(int));
fread(h_ArrayElements, sizeof(int), N, fp);
fclose(fp);
printf("***Binary File Read End!!\n\n");
}
void init_bin_file(IN int n, IN int nf) {
printf("***Binary File Create Start!!\n");
srand((unsigned)time(NULL));
FILE *fp = fopen("gen.bin", "wb");
fwrite(&n, sizeof(int), 1, fp);
fwrite(&nf, sizeof(int), 1, fp);
int i, input;
for (i = 0; i < n; i++) {
input = (int)((float)rand() / RAND_MAX * 200 - 100);
fwrite(&input, sizeof(int), 1, fp);
}
fclose(fp);
printf("***Binary File Create End!!\n\n");
}
int main()
{
int i;
init_bin_file(N_SIZE, NF_SIZE);
read_bin_file();
TIMER_T CPU_time = 0.0f, GPU_time_NO_SHARED = 0.0f, GPU_time_SHARED = 0.0f;
for (i = 0; i < N_ITERATION; i++) {
CHECK_TIME_START;
Sum_n_elements_CPU(h_ArrayElements, h_SumOfArrayElements_CPU, Nf);
CHECK_TIME_END(compute_time);
CPU_time += compute_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_No_Shared, Nf, NO_SHARED);
GPU_time_NO_SHARED += device_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_Shared, Nf, SHARED);
GPU_time_SHARED += device_time;
}
for (i = 0; i < N; i++) {
if (h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_No_Shared[i] || h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_Shared[i]) {
printf("%d : CPU : %d,\tGPU no shared : %d,\tGPU shared : %d\n", i, h_SumOfArrayElements_CPU[i], h_SumOfArrayElements_GPU_No_Shared[i], h_SumOfArrayElements_GPU_Shared[i]);
break;
}
}
if (i == N)
printf("***Kernel execution Success!!\n\n");
printf("***CPU compute time : %.3f ms\n", CPU_time / N_ITERATION);
printf("***GPU NO SHARED compute time : %.3f ms\n", GPU_time_NO_SHARED / N_ITERATION);
printf("***GPU SHARED compute time : %.3f ms\n", GPU_time_SHARED / N_ITERATION);
free(h_ArrayElements);
free(h_SumOfArrayElements_CPU);
free(h_SumOfArrayElements_GPU_No_Shared);
free(h_SumOfArrayElements_GPU_Shared);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
//
// Shared_flag NO_SHARED SHARED
// flag
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
hipError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag) {
hipError_t cudaStatus;
CUDA_CALL(hipSetDevice(0));
int *d_ArrayElements, *d_SumOfElements;
size_t mem_size;
mem_size = N * sizeof(int);
CUDA_CALL(hipMalloc(&d_ArrayElements, mem_size));
CUDA_CALL(hipMalloc(&d_SumOfElements, mem_size));
CUDA_CALL(hipMemcpy(d_ArrayElements, p_ArrayElements, mem_size, hipMemcpyHostToDevice));
dim3 blockDIm(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 gridDim(N / BLOCK_SIZE);
CHECK_TIME_INIT_GPU();
CHECK_TIME_START_GPU();
switch (Shared_flag)
{
case NO_SHARED:
Sum_n_elements_Kernel_No_shared << <gridDim, blockDIm >> > (d_ArrayElements, d_SumOfElements, N, Nf);
break;
case SHARED:
Sum_n_elements_Kernel_shared << <gridDim, blockDIm, sizeof(int)*(BLOCK_SIZE+2*Nf) >> > (d_ArrayElements, d_SumOfElements, N, Nf);
break;
}
CUDA_CALL(cudaStatus = hipDeviceSynchronize());
CHECK_TIME_END_GPU(device_time);
CHECK_TIME_DEST_GPU();
CUDA_CALL(hipMemcpy(p_SumOfElements_GPU, d_SumOfElements, mem_size, hipMemcpyDeviceToHost));
hipFree(d_ArrayElements);
hipFree(d_SumOfElements);
return cudaStatus;
}
| cdfb361e7953bbe35e3ea807435f2ac4bcbed994.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
#define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#define IN
#define OUT
#define INOUT
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
cudaEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(cudaEventCreate(&cuda_timer_start));
CUDA_CALL(cudaEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(cudaEventDestroy(cuda_timer_start));
CUDA_CALL(cudaEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
cudaEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0);
cudaEventSynchronize(cuda_timer_stop);
cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
#define N_SIZE (1 << 26) // 전체 데이터 사이즈
#define NF_SIZE (1 << 6) // Nf 크기
#define NO_SHARED 0 // shared memory를 사용하지 않는 커널 실행 flag
#define SHARED 1 // shared memory를 사용하는 커널 실행 flag
#define BLOCK_SIZE (1 << 6) // CUDA 커널 thread block 사이즈
#define BLOCK_WIDTH (1 << 3)
#define BLOCK_HEIGHT (BLOCK_SIZE / BLOCK_WIDTH)
#define N_ITERATION (1 << 0) // 실험 반복 횟수
extern __shared__ int shared_buffer[];
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
int N;
int Nf;
int *h_ArrayElements;
int *h_SumOfArrayElements_CPU;
int *h_SumOfArrayElements_GPU_No_Shared;
int *h_SumOfArrayElements_GPU_Shared;
cudaError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 커널 코드
// 이 커널은 shared 메모리를 사용하지 않는다.
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel_No_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
for (int i = -Nf; i <= Nf; i++) {
if (id + i >= N || id + i < 0) continue;
d_SumOfArrayElements[id] += d_ArrayElements[id + i];
}
//만약 여기서 sum이라는 로컬변수를 두게되면 registor를 이용하는 것이 되기 때문에 속도가 빠름 따라서 정확히 측정하기위해선 위와 같이 해야함
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 커널 코드
// 이 커널은 shared 메모리를 사용한다.
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
int i;
/*Todo*/
if (thread_id == 0)
{
for (i = 0; i < Nf; i++)
{
if (id + i < Nf) shared_buffer[i] = 0;
else shared_buffer[i] = d_ArrayElements[id + i - Nf];
}
}
if (thread_id == BLOCK_SIZE-1)
{
for (i = 0; i <= Nf; i++)
{
if (id + i >= N) shared_buffer[thread_id+ i + Nf ] = 0;
else shared_buffer[thread_id + i + Nf] = d_ArrayElements[id + i];
}
}
shared_buffer[thread_id + Nf] = d_ArrayElements[id];
__syncthreads();
for (i = 0; i <= 2*Nf; i++)
{
d_SumOfArrayElements[id] += shared_buffer[thread_id + i];
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 C 코드
// GPU kernel의 결과와 비교를 통해 옳은 계산을 하였는지 판단하는 데이터로 활용
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Sum_n_elements_CPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_CPU, int Nf) {
int i, j, sum;
for (i = 0; i < N; i++) {
sum = 0;
for (j = -Nf; j <= Nf; j++) {
if (i + j >= N || i + j < 0) continue;
sum += p_ArrayElements[i + j];
}
p_SumOfElements_CPU[i] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 주어진 bin 파일을 읽는 코드
// 첫 4바이트는 전체 데이터의 개수, 다음 4바이트는 Nf의 크기, 그 이후 N개의 int형 데이터가 저장
// 데이터는 -100 ~ 100 까지의 범위 안의 정수
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void read_bin_file() {
printf("***Binary File Read Start!!\n");
FILE *fp = fopen("gen.bin", "rb");
fread(&N, sizeof(int), 1, fp);
fread(&Nf, sizeof(int), 1, fp);
h_ArrayElements = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_CPU = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_No_Shared = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_Shared = (int *)malloc(N * sizeof(int));
fread(h_ArrayElements, sizeof(int), N, fp);
fclose(fp);
printf("***Binary File Read End!!\n\n");
}
void init_bin_file(IN int n, IN int nf) {
printf("***Binary File Create Start!!\n");
srand((unsigned)time(NULL));
FILE *fp = fopen("gen.bin", "wb");
fwrite(&n, sizeof(int), 1, fp);
fwrite(&nf, sizeof(int), 1, fp);
int i, input;
for (i = 0; i < n; i++) {
input = (int)((float)rand() / RAND_MAX * 200 - 100);
fwrite(&input, sizeof(int), 1, fp);
}
fclose(fp);
printf("***Binary File Create End!!\n\n");
}
int main()
{
int i;
init_bin_file(N_SIZE, NF_SIZE);
read_bin_file();
TIMER_T CPU_time = 0.0f, GPU_time_NO_SHARED = 0.0f, GPU_time_SHARED = 0.0f;
for (i = 0; i < N_ITERATION; i++) {
CHECK_TIME_START;
Sum_n_elements_CPU(h_ArrayElements, h_SumOfArrayElements_CPU, Nf);
CHECK_TIME_END(compute_time);
CPU_time += compute_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_No_Shared, Nf, NO_SHARED);
GPU_time_NO_SHARED += device_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_Shared, Nf, SHARED);
GPU_time_SHARED += device_time;
}
for (i = 0; i < N; i++) {
if (h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_No_Shared[i] || h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_Shared[i]) {
printf("%d : CPU : %d,\tGPU no shared : %d,\tGPU shared : %d\n", i, h_SumOfArrayElements_CPU[i], h_SumOfArrayElements_GPU_No_Shared[i], h_SumOfArrayElements_GPU_Shared[i]);
break;
}
}
if (i == N)
printf("***Kernel execution Success!!\n\n");
printf("***CPU compute time : %.3f ms\n", CPU_time / N_ITERATION);
printf("***GPU NO SHARED compute time : %.3f ms\n", GPU_time_NO_SHARED / N_ITERATION);
printf("***GPU SHARED compute time : %.3f ms\n", GPU_time_SHARED / N_ITERATION);
free(h_ArrayElements);
free(h_SumOfArrayElements_CPU);
free(h_SumOfArrayElements_GPU_No_Shared);
free(h_SumOfArrayElements_GPU_Shared);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 커널을 실행하기 전 필요한 자료들 준비 및 커널을 실행할 디바이스를 설정
// Shared_flag 입력 시 NO_SHARED 나 SHARED 중 한 개의 매크로를 넣으면
// flag값에 맞는 커널을 실행
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cudaError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag) {
cudaError_t cudaStatus;
CUDA_CALL(cudaSetDevice(0));
int *d_ArrayElements, *d_SumOfElements;
size_t mem_size;
mem_size = N * sizeof(int);
CUDA_CALL(cudaMalloc(&d_ArrayElements, mem_size));
CUDA_CALL(cudaMalloc(&d_SumOfElements, mem_size));
CUDA_CALL(cudaMemcpy(d_ArrayElements, p_ArrayElements, mem_size, cudaMemcpyHostToDevice));
dim3 blockDIm(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 gridDim(N / BLOCK_SIZE);
CHECK_TIME_INIT_GPU();
CHECK_TIME_START_GPU();
switch (Shared_flag)
{
case NO_SHARED:
Sum_n_elements_Kernel_No_shared << <gridDim, blockDIm >> > (d_ArrayElements, d_SumOfElements, N, Nf);
break;
case SHARED:
Sum_n_elements_Kernel_shared << <gridDim, blockDIm, sizeof(int)*(BLOCK_SIZE+2*Nf) >> > (d_ArrayElements, d_SumOfElements, N, Nf);
break;
}
CUDA_CALL(cudaStatus = cudaDeviceSynchronize());
CHECK_TIME_END_GPU(device_time);
CHECK_TIME_DEST_GPU();
CUDA_CALL(cudaMemcpy(p_SumOfElements_GPU, d_SumOfElements, mem_size, cudaMemcpyDeviceToHost));
cudaFree(d_ArrayElements);
cudaFree(d_SumOfElements);
return cudaStatus;
}
|
18ab9005eab5176e13ef07c642616742841ff8d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Convert a real4 to a real3 by removing its last element.
*/
inline __device__ real3 trim(real4 v) {
return make_real3(v.x, v.y, v.z);
}
/**
* This does nothing, and just exists to simplify the code generation.
*/
inline __device__ real3 trim(real3 v) {
return v;
}
/**
* Compute the difference between two vectors, optionally taking periodic boundary conditions into account
* and setting the fourth component to the squared magnitude.
*/
inline __device__ real4 delta(real4 vec1, real4 vec2, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(result)
#endif
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the angle between two vectors. The w component of each vector should contain the squared magnitude.
*/
inline __device__ real computeAngle(real4 vec1, real4 vec2) {
real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
real cosine = dotProduct*RSQRT(vec1.w*vec2.w);
real angle;
if (cosine > 0.99f || cosine < -0.99f) {
// We're close to the singularity in acos(), so take the cross product and use asin() instead.
real3 crossProduct = cross(vec1, vec2);
real scale = vec1.w*vec2.w;
angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale));
if (cosine < 0.0f)
angle = M_PI-angle;
}
else
angle = ACOS(cosine);
return angle;
}
/**
* Compute the cross product of two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 computeCross(real4 vec1, real4 vec2) {
real3 result = cross(vec1, vec2);
return make_real4(result.x, result.y, result.z, result.x*result.x + result.y*result.y + result.z*result.z);
}
/**
* Compute forces on donors.
*/
extern "C" __global__ void computeDonorForces(unsigned long long* __restrict__ force, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq,
const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ
PARAMETER_ARGUMENTS) {
extern __shared__ real4 posBuffer[];
mixed energy = 0;
real3 f1 = make_real3(0);
real3 f2 = make_real3(0);
real3 f3 = make_real3(0);
for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x*gridDim.x) {
// Load information about the donor this thread will compute forces on.
int donorIndex = donorStart+blockIdx.x*blockDim.x+threadIdx.x;
int4 atoms, exclusionIndices;
real4 d1, d2, d3;
if (donorIndex < NUM_DONORS) {
atoms = donorAtoms[donorIndex];
d1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0));
d2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0));
d3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0));
#ifdef USE_EXCLUSIONS
exclusionIndices = exclusions[donorIndex];
#endif
}
else
atoms = make_int4(-1, -1, -1, -1);
for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x) {
// Load the next block of acceptors into local memory.
__syncthreads();
int blockSize = min((int) blockDim.x, NUM_ACCEPTORS-acceptorStart);
if (threadIdx.x < blockSize) {
int4 atoms2 = acceptorAtoms[acceptorStart+threadIdx.x];
posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0));
posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0));
posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0));
}
__syncthreads();
if (donorIndex < NUM_DONORS) {
for (int index = 0; index < blockSize; index++) {
int acceptorIndex = acceptorStart+index;
#ifdef USE_EXCLUSIONS
if (acceptorIndex == exclusionIndices.x || acceptorIndex == exclusionIndices.y || acceptorIndex == exclusionIndices.z || acceptorIndex == exclusionIndices.w)
continue;
#endif
// Compute the interaction between a donor and an acceptor.
real4 a1 = posBuffer[3*index];
real4 a2 = posBuffer[3*index+1];
real4 a3 = posBuffer[3*index+2];
real4 deltaD1A1 = delta(d1, a1, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
#ifdef USE_CUTOFF
if (deltaD1A1.w < CUTOFF_SQUARED) {
#endif
COMPUTE_DONOR_FORCE
#ifdef USE_CUTOFF
}
#endif
}
}
}
// Write results
if (donorIndex < NUM_DONORS) {
if (atoms.x > -1) {
atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000)));
atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000)));
atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000)));
__threadfence_block();
}
if (atoms.y > -1) {
atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000)));
atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000)));
atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000)));
__threadfence_block();
}
if (atoms.z > -1) {
atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000)));
atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000)));
atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000)));
__threadfence_block();
}
}
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
/**
* Compute forces on acceptors.
*/
extern "C" __global__ void computeAcceptorForces(unsigned long long* __restrict__ force, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq,
const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ
PARAMETER_ARGUMENTS) {
extern __shared__ real4 posBuffer[];
real3 f1 = make_real3(0);
real3 f2 = make_real3(0);
real3 f3 = make_real3(0);
for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x*gridDim.x) {
// Load information about the acceptor this thread will compute forces on.
int acceptorIndex = acceptorStart+blockIdx.x*blockDim.x+threadIdx.x;
int4 atoms, exclusionIndices;
real4 a1, a2, a3;
if (acceptorIndex < NUM_ACCEPTORS) {
atoms = acceptorAtoms[acceptorIndex];
a1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0));
a2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0));
a3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0));
#ifdef USE_EXCLUSIONS
exclusionIndices = exclusions[acceptorIndex];
#endif
}
else
atoms = make_int4(-1, -1, -1, -1);
for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x) {
// Load the next block of donors into local memory.
__syncthreads();
int blockSize = min((int) blockDim.x, NUM_DONORS-donorStart);
if (threadIdx.x < blockSize) {
int4 atoms2 = donorAtoms[donorStart+threadIdx.x];
posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0));
posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0));
posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0));
}
__syncthreads();
if (acceptorIndex < NUM_ACCEPTORS) {
for (int index = 0; index < blockSize; index++) {
int donorIndex = donorStart+index;
#ifdef USE_EXCLUSIONS
if (donorIndex == exclusionIndices.x || donorIndex == exclusionIndices.y || donorIndex == exclusionIndices.z || donorIndex == exclusionIndices.w)
continue;
#endif
// Compute the interaction between a donor and an acceptor.
real4 d1 = posBuffer[3*index];
real4 d2 = posBuffer[3*index+1];
real4 d3 = posBuffer[3*index+2];
real4 deltaD1A1 = delta(d1, a1, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
#ifdef USE_CUTOFF
if (deltaD1A1.w < CUTOFF_SQUARED) {
#endif
COMPUTE_ACCEPTOR_FORCE
#ifdef USE_CUTOFF
}
#endif
}
}
}
// Write results
if (acceptorIndex < NUM_ACCEPTORS) {
if (atoms.x > -1) {
atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000)));
atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000)));
atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000)));
__threadfence_block();
}
if (atoms.y > -1) {
atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000)));
atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000)));
atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000)));
__threadfence_block();
}
if (atoms.z > -1) {
atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000)));
atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000)));
atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000)));
__threadfence_block();
}
}
}
}
| 18ab9005eab5176e13ef07c642616742841ff8d2.cu | /**
* Convert a real4 to a real3 by removing its last element.
*/
inline __device__ real3 trim(real4 v) {
return make_real3(v.x, v.y, v.z);
}
/**
* This does nothing, and just exists to simplify the code generation.
*/
inline __device__ real3 trim(real3 v) {
return v;
}
/**
* Compute the difference between two vectors, optionally taking periodic boundary conditions into account
* and setting the fourth component to the squared magnitude.
*/
inline __device__ real4 delta(real4 vec1, real4 vec2, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(result)
#endif
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the angle between two vectors. The w component of each vector should contain the squared magnitude.
*/
inline __device__ real computeAngle(real4 vec1, real4 vec2) {
real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
real cosine = dotProduct*RSQRT(vec1.w*vec2.w);
real angle;
if (cosine > 0.99f || cosine < -0.99f) {
// We're close to the singularity in acos(), so take the cross product and use asin() instead.
real3 crossProduct = cross(vec1, vec2);
real scale = vec1.w*vec2.w;
angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale));
if (cosine < 0.0f)
angle = M_PI-angle;
}
else
angle = ACOS(cosine);
return angle;
}
/**
* Compute the cross product of two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 computeCross(real4 vec1, real4 vec2) {
real3 result = cross(vec1, vec2);
return make_real4(result.x, result.y, result.z, result.x*result.x + result.y*result.y + result.z*result.z);
}
/**
* Compute forces on donors.
*/
extern "C" __global__ void computeDonorForces(unsigned long long* __restrict__ force, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq,
const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ
PARAMETER_ARGUMENTS) {
extern __shared__ real4 posBuffer[];
mixed energy = 0;
real3 f1 = make_real3(0);
real3 f2 = make_real3(0);
real3 f3 = make_real3(0);
for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x*gridDim.x) {
// Load information about the donor this thread will compute forces on.
int donorIndex = donorStart+blockIdx.x*blockDim.x+threadIdx.x;
int4 atoms, exclusionIndices;
real4 d1, d2, d3;
if (donorIndex < NUM_DONORS) {
atoms = donorAtoms[donorIndex];
d1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0));
d2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0));
d3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0));
#ifdef USE_EXCLUSIONS
exclusionIndices = exclusions[donorIndex];
#endif
}
else
atoms = make_int4(-1, -1, -1, -1);
for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x) {
// Load the next block of acceptors into local memory.
__syncthreads();
int blockSize = min((int) blockDim.x, NUM_ACCEPTORS-acceptorStart);
if (threadIdx.x < blockSize) {
int4 atoms2 = acceptorAtoms[acceptorStart+threadIdx.x];
posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0));
posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0));
posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0));
}
__syncthreads();
if (donorIndex < NUM_DONORS) {
for (int index = 0; index < blockSize; index++) {
int acceptorIndex = acceptorStart+index;
#ifdef USE_EXCLUSIONS
if (acceptorIndex == exclusionIndices.x || acceptorIndex == exclusionIndices.y || acceptorIndex == exclusionIndices.z || acceptorIndex == exclusionIndices.w)
continue;
#endif
// Compute the interaction between a donor and an acceptor.
real4 a1 = posBuffer[3*index];
real4 a2 = posBuffer[3*index+1];
real4 a3 = posBuffer[3*index+2];
real4 deltaD1A1 = delta(d1, a1, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
#ifdef USE_CUTOFF
if (deltaD1A1.w < CUTOFF_SQUARED) {
#endif
COMPUTE_DONOR_FORCE
#ifdef USE_CUTOFF
}
#endif
}
}
}
// Write results
if (donorIndex < NUM_DONORS) {
if (atoms.x > -1) {
atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000)));
atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000)));
atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000)));
__threadfence_block();
}
if (atoms.y > -1) {
atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000)));
atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000)));
atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000)));
__threadfence_block();
}
if (atoms.z > -1) {
atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000)));
atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000)));
atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000)));
__threadfence_block();
}
}
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
/**
* Compute forces on acceptors.
*/
extern "C" __global__ void computeAcceptorForces(unsigned long long* __restrict__ force, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq,
const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ
PARAMETER_ARGUMENTS) {
extern __shared__ real4 posBuffer[];
real3 f1 = make_real3(0);
real3 f2 = make_real3(0);
real3 f3 = make_real3(0);
for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x*gridDim.x) {
// Load information about the acceptor this thread will compute forces on.
int acceptorIndex = acceptorStart+blockIdx.x*blockDim.x+threadIdx.x;
int4 atoms, exclusionIndices;
real4 a1, a2, a3;
if (acceptorIndex < NUM_ACCEPTORS) {
atoms = acceptorAtoms[acceptorIndex];
a1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0));
a2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0));
a3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0));
#ifdef USE_EXCLUSIONS
exclusionIndices = exclusions[acceptorIndex];
#endif
}
else
atoms = make_int4(-1, -1, -1, -1);
for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x) {
// Load the next block of donors into local memory.
__syncthreads();
int blockSize = min((int) blockDim.x, NUM_DONORS-donorStart);
if (threadIdx.x < blockSize) {
int4 atoms2 = donorAtoms[donorStart+threadIdx.x];
posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0));
posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0));
posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0));
}
__syncthreads();
if (acceptorIndex < NUM_ACCEPTORS) {
for (int index = 0; index < blockSize; index++) {
int donorIndex = donorStart+index;
#ifdef USE_EXCLUSIONS
if (donorIndex == exclusionIndices.x || donorIndex == exclusionIndices.y || donorIndex == exclusionIndices.z || donorIndex == exclusionIndices.w)
continue;
#endif
// Compute the interaction between a donor and an acceptor.
real4 d1 = posBuffer[3*index];
real4 d2 = posBuffer[3*index+1];
real4 d3 = posBuffer[3*index+2];
real4 deltaD1A1 = delta(d1, a1, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
#ifdef USE_CUTOFF
if (deltaD1A1.w < CUTOFF_SQUARED) {
#endif
COMPUTE_ACCEPTOR_FORCE
#ifdef USE_CUTOFF
}
#endif
}
}
}
// Write results
if (acceptorIndex < NUM_ACCEPTORS) {
if (atoms.x > -1) {
atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000)));
atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000)));
atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000)));
__threadfence_block();
}
if (atoms.y > -1) {
atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000)));
atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000)));
atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000)));
__threadfence_block();
}
if (atoms.z > -1) {
atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000)));
atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000)));
atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000)));
__threadfence_block();
}
}
}
}
|
f35613d4feff198156c26c3fa34659bf82e226c6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "cudnn_util.h"
#include "neural_network_cudnn_exception.h"
#include "../sparse_convolution_layer.h"
namespace nnforge
{
namespace cuda
{
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
extern __shared__ float arr_sh[];
__global__ void sparse_fully_connected_kernel(
float * __restrict output_neurons,
const float * __restrict input_neurons,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
int entry_count,
int input_feature_map_block_size,
int window_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * input_feature_map_block_size;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * OUTPUT_ELEM_COUNT_BLOCK_SIZE;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int it_count = min(input_feature_map_block_size, end_column_index - base_nnz_index);
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile int * column_indices_sh = (int *)arr_sh;
if (lane_id < it_count)
column_indices_sh[thread_id] = column_indices[base_nnz_index + lane_id];
int window_it_count = (window_size + 31) >> 5;
bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
int entry_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
{
valid[i] = (i < (entry_count - base_entry_id));
entry_ids[i] = valid[i] ? (base_entry_id + i) : (entry_count - 1);
}
float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] = 0.0F;
for(int i = 0; i < it_count; ++i)
{
int index = base_nnz_index + i;
int column_id = column_indices_sh[warp_id * 32 + i];
int local_weight_id = lane_id;
for(int j = 0; j < window_it_count; ++j)
{
if (local_weight_id < window_size)
{
float w = __load_nc(weights + (int)(index * window_size + local_weight_id));
#pragma unroll
for(int k = 0; k < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++k)
{
float inp = __load_nc(input_neurons + entry_ids[k] * input_elem_count_per_entry + column_id * window_size + local_weight_id);
sums[k] += w * inp;
}
}
local_weight_id += 32;
}
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __shfl_xor(sums[i], tx);
}
if (lane_id == 0)
{
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
if (valid[i])
atomicAdd(output_neurons + (base_entry_id + i) * output_elem_count_per_entry + row_id, sums[i]);
}
}
const int sparse_fully_connected_layer_tester_cuda::max_input_feature_map_block_size = 32;
sparse_fully_connected_layer_tester_cuda::sparse_fully_connected_layer_tester_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
sparse_fully_connected_layer_tester_cuda::~sparse_fully_connected_layer_tester_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void sparse_fully_connected_layer_tester_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
cuda_util::set_with_value(
*cuda_config,
*output_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
std::pair<int, int> input_feature_map_block_size_and_count = get_input_feature_map_block_size_and_count();
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * input_feature_map_block_size_and_count.second,
output_elem_count_per_entry,
(entry_count + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float);
hipLaunchKernelGGL(( sparse_fully_connected_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
*output_buffer,
*input_buffers[0],
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
input_elem_count_per_entry_list[0],
entry_count,
input_feature_map_block_size_and_count.first,
window_size);
// Add bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_util::set_tensor_descriptor(
output_data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_buffer));
}
}
void sparse_fully_connected_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
window_size = 1;
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_size *= *it;
cudnn_util::set_tensor_bias_descriptor(
bias_desc,
output_configuration_specific.feature_map_count,
static_cast<unsigned int>(output_configuration_specific.dimension_sizes.size()));
}
void sparse_fully_connected_layer_tester_cuda::notify_data_custom(layer_data_custom::const_ptr host_data_custom)
{
max_column_index_count_per_row = 0;
const std::vector<int>& row_indices = host_data_custom->at(1);
for(int i = 0; i < row_indices.size() - 1; ++i)
max_column_index_count_per_row = ::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]);
}
std::pair<int, int> sparse_fully_connected_layer_tester_cuda::get_input_feature_map_block_size_and_count() const
{
int candidate_block_size = max_column_index_count_per_row;
if (candidate_block_size <= max_input_feature_map_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_input_feature_map_block_size - 1) / max_input_feature_map_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
}
}
| f35613d4feff198156c26c3fa34659bf82e226c6.cu | /*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "cudnn_util.h"
#include "neural_network_cudnn_exception.h"
#include "../sparse_convolution_layer.h"
namespace nnforge
{
namespace cuda
{
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
extern __shared__ float arr_sh[];
__global__ void sparse_fully_connected_kernel(
float * __restrict output_neurons,
const float * __restrict input_neurons,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
int entry_count,
int input_feature_map_block_size,
int window_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * input_feature_map_block_size;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * OUTPUT_ELEM_COUNT_BLOCK_SIZE;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int it_count = min(input_feature_map_block_size, end_column_index - base_nnz_index);
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile int * column_indices_sh = (int *)arr_sh;
if (lane_id < it_count)
column_indices_sh[thread_id] = column_indices[base_nnz_index + lane_id];
int window_it_count = (window_size + 31) >> 5;
bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
int entry_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
{
valid[i] = (i < (entry_count - base_entry_id));
entry_ids[i] = valid[i] ? (base_entry_id + i) : (entry_count - 1);
}
float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] = 0.0F;
for(int i = 0; i < it_count; ++i)
{
int index = base_nnz_index + i;
int column_id = column_indices_sh[warp_id * 32 + i];
int local_weight_id = lane_id;
for(int j = 0; j < window_it_count; ++j)
{
if (local_weight_id < window_size)
{
float w = __load_nc(weights + (int)(index * window_size + local_weight_id));
#pragma unroll
for(int k = 0; k < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++k)
{
float inp = __load_nc(input_neurons + entry_ids[k] * input_elem_count_per_entry + column_id * window_size + local_weight_id);
sums[k] += w * inp;
}
}
local_weight_id += 32;
}
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __shfl_xor(sums[i], tx);
}
if (lane_id == 0)
{
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
if (valid[i])
atomicAdd(output_neurons + (base_entry_id + i) * output_elem_count_per_entry + row_id, sums[i]);
}
}
const int sparse_fully_connected_layer_tester_cuda::max_input_feature_map_block_size = 32;
sparse_fully_connected_layer_tester_cuda::sparse_fully_connected_layer_tester_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
sparse_fully_connected_layer_tester_cuda::~sparse_fully_connected_layer_tester_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void sparse_fully_connected_layer_tester_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
cuda_util::set_with_value(
*cuda_config,
*output_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
std::pair<int, int> input_feature_map_block_size_and_count = get_input_feature_map_block_size_and_count();
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * input_feature_map_block_size_and_count.second,
output_elem_count_per_entry,
(entry_count + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float);
sparse_fully_connected_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
*output_buffer,
*input_buffers[0],
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
input_elem_count_per_entry_list[0],
entry_count,
input_feature_map_block_size_and_count.first,
window_size);
// Add bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_util::set_tensor_descriptor(
output_data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_buffer));
}
}
void sparse_fully_connected_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
window_size = 1;
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_size *= *it;
cudnn_util::set_tensor_bias_descriptor(
bias_desc,
output_configuration_specific.feature_map_count,
static_cast<unsigned int>(output_configuration_specific.dimension_sizes.size()));
}
void sparse_fully_connected_layer_tester_cuda::notify_data_custom(layer_data_custom::const_ptr host_data_custom)
{
max_column_index_count_per_row = 0;
const std::vector<int>& row_indices = host_data_custom->at(1);
for(int i = 0; i < row_indices.size() - 1; ++i)
max_column_index_count_per_row = std::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]);
}
std::pair<int, int> sparse_fully_connected_layer_tester_cuda::get_input_feature_map_block_size_and_count() const
{
int candidate_block_size = max_column_index_count_per_row;
if (candidate_block_size <= max_input_feature_map_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_input_feature_map_block_size - 1) / max_input_feature_map_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
}
}
|
fa34a29004c9362f88d302f6f00f869714310e9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layer.h"
// --------------------------------------------------------------------------
// kernel code
// convert_bottom_{gpu, cpu}
// --------------------------------------------------------------------------
// convert bottom3d (C x H x W)
// -> bottom5d (C x kernel_h x kernel_w x H5 x W5)
// given (c, h5, w5),
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = (-pad_h + stride_h * h5) + kh, kh = { 0, 1, ..., kernel_h - 1 }
// w = (-pad_w + stride_w * w5) + kw, kw = { 0, 1, ..., kernel_w - 1 }
// if !(0 <= h < H) or !(0 <= w < W), assign 0
#ifdef GPU
__global__
void convert_bottom_gpu(const real* const bottom3d,
real* const bottom5d,
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
// thread index: (c, h5, w5) = c*H5*W5 + h5*W5 + w5
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int H5W5 = H5 * W5;
if (index < C * H5W5) {
// parse thread index -> (c, h5, w5)
const int c = index / H5W5;
const int h5 = (index / W5) % H5;
const int w5 = index % W5;
// p_bottom5d initially points to bottom5d[c][kh = 0][kw = 0][h5][w5]
real* const p_bottom5d = bottom5d + index +
(c * H5W5) * (kernel_h * kernel_w - 1);
// (h_start, w_start): upper-left corner of bottom3d's kernel patch
const int h_start = h5 * stride_h - pad_h;
const int w_start = w5 * stride_w - pad_w;
const real* const p_bottom3d = bottom3d +
(c * H + h_start) * W + w_start;
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = h_start + kh, kh = {0, 1, ..., kernel_h - 1}
// w = w_start + kw, kw = {0, 1, ..., kernel_w - 1}
// if (h, w) is in a zero-padded region, assign 0
// Original code
#ifdef PASS
{
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h = h_start + kh;
const int w = w_start + kw;
p_bottom5d[(kh * kernel_w + kw) * H5W5] =
(h >= 0 && h < H && w >= 0 && w < W) ? p_bottom3d[kh * W + kw]
: 0;
}
}
}
#endif
// Ternary operations removed
{
const int kh_start = MAX(0, -h_start);
const int kw_start = MAX(0, -w_start);
const int kh_end = MIN(H - h_start, kernel_h);
const int kw_end = MIN(W - w_start, kernel_w);
for (int kh = 0; kh < kh_start; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
p_bottom5d[(kh * kernel_w + kw) * H5W5] = 0;
}
}
for (int kh = kh_start; kh < kh_end; ++kh) {
for (int kw = 0; kw < kw_start; ++kw) {
p_bottom5d[(kh * kernel_w + kw) * H5W5] = 0;
}
for (int kw = kw_start; kw < kw_end; ++kw) {
p_bottom5d[(kh * kernel_w + kw) * H5W5] = p_bottom3d[kh * W + kw];
}
for (int kw = kw_end; kw < kernel_w; ++kw) {
p_bottom5d[(kh * kernel_w + kw) * H5W5] = 0;
}
}
for (int kh = kh_end; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
p_bottom5d[(kh * kernel_w + kw) * H5W5] = 0;
}
}
}
}
}
#else
void convert_bottom_cpu(const real* const bottom3d,
real* const bottom5d,
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
for (int c = 0; c < C; ++c) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
// pointer to bottom5d[c][kh][kw][h5 = 0][w5 = 0]
real* const p_bottom5d = bottom5d +
((c * kernel_h + kh) * kernel_w + kw) * H5 * W5;
int h = -pad_h + kh;
int h5 = 0;
// for h < 0 (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h < 0; h += stride_h, ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for 0 <= h < H (data region)
for (; h < H && h5 < H5; h += stride_h, ++h5) {
// pointer to bottom3d[c][h][w = 0]
int w = -pad_w + kw;
int w5 = 0;
// for w < 0 (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w < 0; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
// for 0 <= w < W (data region):
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
for (; w < W && w5 < W5; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = bottom3d[(c * H + h) * W + w];
}
// for w >= W (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for h >= H (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h5 < H5; ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
} // endfor kw
} // endfor kh
} // endfor c
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// conv_forward
// --------------------------------------------------------------------------
// convolution: bottom -> top
// G: number of groups
// bottom: (G * C) x H x W
// top: (G * C') x H' x W'
// weight: G x C' x C x kernel_h x kernel_w
// bias: (G * C') x 1
// temp: (G * C * kernel_h * kernel_w) x (H' * W') array
// const: 1 x (H' * W') array, const[i] = 1 for all i
void conv_forward(const Tensor* const bottom3d,
Tensor* const top3d,
const Tensor* const weight5d,
const Tensor* const bias1d,
real* const temp_data,
const real* const const_data,
const LayerOption* const option)
{
// weight shape: G x C' x C x kernel_h x kernel_w
const int num_groups = weight5d->shape[0][0]; // G
const int top_C = weight5d->shape[0][1]; // C'
const int bottom_C = weight5d->shape[0][2]; // C
const int kernel_h = weight5d->shape[0][3];
const int kernel_w = weight5d->shape[0][4];
// padding size & stride size
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom3d->data;
real* p_top_item = top3d->data;
real* p_temp_data = temp_data;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// set top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
top3d->shape[n][0] = num_groups * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// convert bottom shape
// (G * C) x H x W -> (G * C * kernel_h * kernel_w) x (H' * W')
{
#ifdef GPU
// one thread computes "kernel_h * kernel_w" entries in top
const int num_threads = num_groups * bottom_C * top_H * top_W;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( convert_bottom_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
p_bottom_item, p_temp_data,
num_groups * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#else
convert_bottom_cpu(
p_bottom_item, p_temp_data,
num_groups * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#endif
}
// compute top[g] = dot(weight[g], bottom[g])
// weight[g]: C' x (C * kernel_h * kernel_w)
// bottom[g]: (C * kernel_h * kernel_w) x (H' * W')
// top[g]: C' x H' x W'
for (int g = 0; g < num_groups; ++g) {
const int kernel_size = bottom_C * kernel_h * kernel_w;
const int top_area = top_H * top_W;
const real* const p_temp_g = p_temp_data +
g * kernel_size * top_area;
const real* const p_weight_g = weight5d->data +
g * top_C * kernel_size;
real* const p_top_g = p_top_item + g * top_C * top_area;
// compute Z = alpha * dot(X, Y) + beta * Z
// X (= weight): m x p, Y (= bottom): p x n, Z (= top): m x n
// X, Y, Z: row-major order (e.g., Z[i][j] = Z[i * n + j])
#ifdef GPU
// input arguments:
// cublas handle,
// do_transpose_Y (= false), do_transpose_X (= false),
// n (= H' * W'), m (= C'), p (= C * kernel_h * kernel_w),
// &alpha (= 1),
// &Y, number of columns in Y (= n),
// &X, number of columns in X (= p),
// &beta (= 0),
// &Z, number of columns in Z (= n)
const real one = 1.0f, zero = 0.0f;
hipblasSgemm(*((hipblasHandle_t*)option->handle),
HIPBLAS_OP_N, HIPBLAS_OP_N,
top_area, top_C, kernel_size,
&one,
p_temp_g, top_area,
p_weight_g, kernel_size,
&zero,
p_top_g, top_area);
#else
// input arguments:
// is_row_major_order (= true),
// do_transpose_X (= false), do_transpose_Y (= false),
// m (= C'), n (= H' * W'), p (= C * kernel_h * kernel_w),
// alpha (= 1),
// &X, number of columns in X (= p),
// &Y, number of columns in Y (= n),
// beta (= 0),
// &Z, number of columns in Z (= n)
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasNoTrans,
top_C, top_area, kernel_size,
1.0f,
p_weight_g, kernel_size,
p_temp_g, top_area,
0.0f,
p_top_g, top_area);
#endif
}
// compute top[i][j] = top[i][j] + bias[i]
// top: (G * C') x (H' * W')
// bias: (G * C') x 1
if (option->bias) {
const int top_channels = num_groups * top_C;
const int top_area = top_H * top_W;
// the computation is equivalent to...
// top = top + dot(bias, constant)
// constant: 1 x (H' * W'), constant[i] = 1 for all i
#ifdef GPU
// thus, input arguments:
// do_transpose_Y (= false), do_transpose_X (= false),
// n = H' * W', m = G * C', p = 1
// alpha = 1, beta = 1
const real one = 1.0f;
hipblasSgemm(*((hipblasHandle_t*)option->handle),
HIPBLAS_OP_N, HIPBLAS_OP_N,
top_area, top_channels, 1,
&one,
const_data, top_area,
bias1d->data, 1,
&one,
p_top_item, top_area);
#else
// input arguments:
// do_transpose_X (= false), do_transpose_Y (= false),
// m = G * C', n = H' * W', p = 1
// alpha = 1, beta = 1
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasNoTrans,
top_channels, top_area, 1,
1.0f,
bias1d->data, 1,
const_data, top_area,
1.0f,
p_top_item, top_area);
#endif
}
// locate next item
{
const int bottom_size = num_groups * bottom_C * bottom_H * bottom_W;
const int top_size = num_groups * top_C * top_H * top_W;
//const int temp_size =
// num_groups * bottom_C * kernel_h * kernel_w * top_H * top_W;
p_bottom_item += bottom_size;
p_top_item += top_size;
//p_temp_data += temp_size;
}
} // endfor batch
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
void conv_shape(const Tensor* const bottom3d,
Tensor* const top3d,
Tensor* const weight5d,
Tensor* const bias1d,
int* const temp_size,
int* const const_size,
const LayerOption* const option)
{
const int num_groups = option->num_groups; // G
const int top_C = option->out_channels / option->num_groups; // C'
const int bottom_C = bottom3d->shape[0][0] / option->num_groups; // C
const int kernel_h = option->kernel_h;
const int kernel_w = option->kernel_w;
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// calculate shape for each item in the batch
int total_size = 0, total_top_area = 0, max_top_area = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
const int top_area = top_H * top_W;
top3d->shape[n][0] = num_groups * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// start position for n-th item in top3d->data
top3d->start[n] = total_size;
total_size += num_groups * top_C * top_H * top_W;
// sum(H' * W') & max(H' * W') in the batch
total_top_area += top_area;
max_top_area = MAX(max_top_area, top_area);
}
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
// weight shape: G x C' x C x kernel_h x kernel_w
weight5d->num_items = 1;
weight5d->ndim = 5;
weight5d->shape[0][0] = num_groups;
weight5d->shape[0][1] = top_C;
weight5d->shape[0][2] = bottom_C;
weight5d->shape[0][3] = kernel_h;
weight5d->shape[0][4] = kernel_w;
weight5d->start[0] = 0;
// bias shape: (G * C') x 1
if (option->bias) {
bias1d->num_items = 1;
bias1d->ndim = 1;
bias1d->shape[0][0] = num_groups * top_C;
bias1d->start[0] = 0;
}
else if (bias1d) {
bias1d->num_items = 0;
bias1d->ndim = 0;
bias1d->shape[0][0] = 0;
bias1d->start[0] = 0;
}
// temporary data size: G * C * kernel_h * kernel_w * sum(H' * W')
*temp_size = num_groups * bottom_C * kernel_h * kernel_w * max_top_area;
// constant data size: max(H' * W')
*const_size = max_top_area;
}
// --------------------------------------------------------------------------
// API code
// --------------------------------------------------------------------------
void forward_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
Tensor* p_bias = (layer->option.bias) ? &layer->params[1] : NULL;
conv_forward(layer->p_bottoms[0], &layer->tops[0],
&layer->params[0], p_bias,
net->temp_data, net->const_data, &layer->option);
print_tensor_info(layer->name, &layer->tops[0]);
}
void shape_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
int temp_size, const_size;
Tensor* p_bias = (layer->option.bias) ? &layer->params[1] : NULL;
conv_shape(layer->p_bottoms[0], &layer->tops[0],
&layer->params[0], p_bias,
&temp_size, &const_size, &layer->option);
update_net_size(net, layer, temp_size, 0, const_size);
}
// --------------------------------------------------------------------------
// test code
// --------------------------------------------------------------------------
#ifdef TEST
int main(int argc, char* argv[])
{
// variable declaration & memory allocation
Tensor X, Y, W, b;
real *X_data = NULL, *Y_data = NULL, *Y_true_data = NULL;
real *W_data = NULL, *b_data = NULL;
real *p_temp_data = NULL, *const_data = NULL, *p_const_data = NULL;
LayerOption option;
int temp_size, const_size;
// set option
{
option.num_groups = 1;
option.out_channels = 512;
option.kernel_h = 1;
option.kernel_w = 1;
option.pad_h = 0;
option.pad_w = 0;
option.stride_h = 1;
option.stride_w = 1;
option.bias = 1;
}
// load data
{
int ndim;
int shape[g_max_ndim];
int total_size;
X_data = load_data("../data/temp/conv_bottom0.bin",
&ndim, shape, NULL);
X.num_items = shape[0];
X.ndim = ndim - 1;
total_size = 0;
for (int n = 0; n < X.num_items; ++n) {
int size_n = 1;
for (int i = 0; i < X.ndim; ++i) {
X.shape[n][i] = shape[i + 1];
size_n *= shape[i + 1];
}
X.start[n] = total_size;
total_size += size_n;
}
conv_shape(&X, &Y, &W, &b, &temp_size, &const_size, &option);
Y_true_data = load_data("../data/temp/conv_top0.bin",
&ndim, shape, NULL);
Y_data = (real*)malloc(flatten_size(&Y) * sizeof(real));
W_data = load_data("../data/temp/conv_param0.bin",
&ndim, shape, NULL);
if (option.bias) {
b_data = load_data("../data/temp/conv_param1.bin",
&ndim, shape, NULL);
const_data = (real*)malloc(const_size * sizeof(real));
for (int i = 0; i < const_size; ++i) {
const_data[i] = 1;
}
}
}
// CUDA initialization
#ifdef GPU
{
printf("set device\n");
hipSetDevice(0);
option.handle = (hipblasHandle_t*)malloc(sizeof(hipblasHandle_t));
if (hipblasCreate((hipblasHandle_t*)option.handle)
!= HIPBLAS_STATUS_SUCCESS) {
printf("cublas creation failed\n");
}
}
#endif
// bind loaded data to corresponding tensors
#ifdef GPU
{
const int X_size = flatten_size(&X);
const int Y_size = flatten_size(&Y);
const int W_size = flatten_size(&W);
const int b_size = flatten_size(&b);
printf("gpu malloc\n");
hipMalloc(&X.data, X_size * sizeof(real));
hipMalloc(&Y.data, Y_size * sizeof(real));
hipMalloc(&W.data, W_size * sizeof(real));
hipMalloc(&p_temp_data, temp_size * sizeof(real));
if (option.bias) {
hipMalloc(&b.data, b_size * sizeof(real));
hipMalloc(&p_const_data, const_size * sizeof(real));
}
else {
b.data = NULL;
}
printf("memcpy: cpu -> gpu\n");
hipMemcpyAsync(X.data, X_data, X_size * sizeof(real),
hipMemcpyHostToDevice);
hipMemcpyAsync(W.data, W_data, W_size * sizeof(real),
hipMemcpyHostToDevice);
if (option.bias) {
hipMemcpyAsync(b.data, b_data, b_size * sizeof(real),
hipMemcpyHostToDevice);
hipMemcpyAsync(p_const_data, const_data, const_size * sizeof(real),
hipMemcpyHostToDevice);
}
}
#else
{
X.data = X_data;
Y.data = Y_data;
W.data = W_data;
p_temp_data = (real*)malloc(temp_size * sizeof(real));
if (option.bias) {
b.data = b_data;
p_const_data = const_data;
}
else {
b.data = NULL;
}
}
#endif
// do forward operation
{
printf("do forward\n");
conv_forward(&X, &Y, &W, &b, p_temp_data, p_const_data, &option);
}
// copy GPU data to main memory
#ifdef GPU
{
const int Y_size = flatten_size(&Y);
printf("memcpy: cpu <- gpu\n");
hipMemcpyAsync(Y_data, Y.data, Y_size * sizeof(real),
hipMemcpyDeviceToHost);
}
#endif
// verify results
{
int i = 0;
printf("verification\n");
for (int n = 0; n < Y.num_items; ++n) {
for (int c = 0; c < Y.shape[n][0]; ++c) {
for (int h = 0; h < Y.shape[n][1]; ++h) {
for (int w = 0; w < Y.shape[n][2]; ++w) {
real diff = ABS(Y_data[i] - Y_true_data[i]);
diff /= 1e-10f + MIN(ABS(Y_data[i]), ABS(Y_true_data[i]));
#ifdef GPU
if (diff > 0) {
printf("Y[%d,%d,%d,%d] = %.6f Y_true[%d,%d,%d,%d] = %.6f\n",
n, c, h, w, Y_data[i], n, c, h, w, Y_true_data[i]);
}
#else
if (diff > 1e-3f) {
printf("Y[%d,%d,%d,%d] = %.6f Y_true[%d,%d,%d,%d] = %.6f\n",
n, c, h, w, Y_data[i], n, c, h, w, Y_true_data[i]);
}
#endif
++i;
} // endfor w
} // endfor h
} // endfor c
} // endfor n
}
// memory deallocation
{
printf("free\n");
free(X_data);
free(Y_data);
free(Y_true_data);
free(W_data);
if (option.bias) {
free(b_data);
free(const_data);
}
}
#ifdef GPU
{
printf("gpu free\n");
hipFree(X.data);
hipFree(Y.data);
hipFree(W.data);
hipFree(p_temp_data);
if (option.bias) {
hipFree(b.data);
hipFree(p_const_data);
}
if (hipblasDestroy(*((hipblasHandle_t*)option.handle))
!= HIPBLAS_STATUS_SUCCESS) {
printf("cublas destruction failed\n");
}
free(option.handle);
}
#else
{
free(p_temp_data);
}
#endif
return 0;
}
#endif // endifdef TEST
| fa34a29004c9362f88d302f6f00f869714310e9f.cu | #include "layer.h"
// --------------------------------------------------------------------------
// kernel code
// convert_bottom_{gpu, cpu}
// --------------------------------------------------------------------------
// convert bottom3d (C x H x W)
// -> bottom5d (C x kernel_h x kernel_w x H5 x W5)
// given (c, h5, w5),
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = (-pad_h + stride_h * h5) + kh, kh = { 0, 1, ..., kernel_h - 1 }
// w = (-pad_w + stride_w * w5) + kw, kw = { 0, 1, ..., kernel_w - 1 }
// if !(0 <= h < H) or !(0 <= w < W), assign 0
#ifdef GPU
__global__
void convert_bottom_gpu(const real* const bottom3d,
real* const bottom5d,
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
// thread index: (c, h5, w5) = c*H5*W5 + h5*W5 + w5
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int H5W5 = H5 * W5;
if (index < C * H5W5) {
// parse thread index -> (c, h5, w5)
const int c = index / H5W5;
const int h5 = (index / W5) % H5;
const int w5 = index % W5;
// p_bottom5d initially points to bottom5d[c][kh = 0][kw = 0][h5][w5]
real* const p_bottom5d = bottom5d + index +
(c * H5W5) * (kernel_h * kernel_w - 1);
// (h_start, w_start): upper-left corner of bottom3d's kernel patch
const int h_start = h5 * stride_h - pad_h;
const int w_start = w5 * stride_w - pad_w;
const real* const p_bottom3d = bottom3d +
(c * H + h_start) * W + w_start;
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = h_start + kh, kh = {0, 1, ..., kernel_h - 1}
// w = w_start + kw, kw = {0, 1, ..., kernel_w - 1}
// if (h, w) is in a zero-padded region, assign 0
// Original code
#ifdef PASS
{
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h = h_start + kh;
const int w = w_start + kw;
p_bottom5d[(kh * kernel_w + kw) * H5W5] =
(h >= 0 && h < H && w >= 0 && w < W) ? p_bottom3d[kh * W + kw]
: 0;
}
}
}
#endif
// Ternary operations removed
{
const int kh_start = MAX(0, -h_start);
const int kw_start = MAX(0, -w_start);
const int kh_end = MIN(H - h_start, kernel_h);
const int kw_end = MIN(W - w_start, kernel_w);
for (int kh = 0; kh < kh_start; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
p_bottom5d[(kh * kernel_w + kw) * H5W5] = 0;
}
}
for (int kh = kh_start; kh < kh_end; ++kh) {
for (int kw = 0; kw < kw_start; ++kw) {
p_bottom5d[(kh * kernel_w + kw) * H5W5] = 0;
}
for (int kw = kw_start; kw < kw_end; ++kw) {
p_bottom5d[(kh * kernel_w + kw) * H5W5] = p_bottom3d[kh * W + kw];
}
for (int kw = kw_end; kw < kernel_w; ++kw) {
p_bottom5d[(kh * kernel_w + kw) * H5W5] = 0;
}
}
for (int kh = kh_end; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
p_bottom5d[(kh * kernel_w + kw) * H5W5] = 0;
}
}
}
}
}
#else
void convert_bottom_cpu(const real* const bottom3d,
real* const bottom5d,
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
for (int c = 0; c < C; ++c) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
// pointer to bottom5d[c][kh][kw][h5 = 0][w5 = 0]
real* const p_bottom5d = bottom5d +
((c * kernel_h + kh) * kernel_w + kw) * H5 * W5;
int h = -pad_h + kh;
int h5 = 0;
// for h < 0 (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h < 0; h += stride_h, ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for 0 <= h < H (data region)
for (; h < H && h5 < H5; h += stride_h, ++h5) {
// pointer to bottom3d[c][h][w = 0]
int w = -pad_w + kw;
int w5 = 0;
// for w < 0 (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w < 0; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
// for 0 <= w < W (data region):
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
for (; w < W && w5 < W5; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = bottom3d[(c * H + h) * W + w];
}
// for w >= W (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for h >= H (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h5 < H5; ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
} // endfor kw
} // endfor kh
} // endfor c
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// conv_forward
// --------------------------------------------------------------------------
// convolution: bottom -> top
// G: number of groups
// bottom: (G * C) x H x W
// top: (G * C') x H' x W'
// weight: G x C' x C x kernel_h x kernel_w
// bias: (G * C') x 1
// temp: (G * C * kernel_h * kernel_w) x (H' * W') array
// const: 1 x (H' * W') array, const[i] = 1 for all i
void conv_forward(const Tensor* const bottom3d,
Tensor* const top3d,
const Tensor* const weight5d,
const Tensor* const bias1d,
real* const temp_data,
const real* const const_data,
const LayerOption* const option)
{
// weight shape: G x C' x C x kernel_h x kernel_w
const int num_groups = weight5d->shape[0][0]; // G
const int top_C = weight5d->shape[0][1]; // C'
const int bottom_C = weight5d->shape[0][2]; // C
const int kernel_h = weight5d->shape[0][3];
const int kernel_w = weight5d->shape[0][4];
// padding size & stride size
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom3d->data;
real* p_top_item = top3d->data;
real* p_temp_data = temp_data;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// set top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
top3d->shape[n][0] = num_groups * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// convert bottom shape
// (G * C) x H x W -> (G * C * kernel_h * kernel_w) x (H' * W')
{
#ifdef GPU
// one thread computes "kernel_h * kernel_w" entries in top
const int num_threads = num_groups * bottom_C * top_H * top_W;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
convert_bottom_gpu<<<num_blocks, threads_per_block>>>(
p_bottom_item, p_temp_data,
num_groups * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#else
convert_bottom_cpu(
p_bottom_item, p_temp_data,
num_groups * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#endif
}
// compute top[g] = dot(weight[g], bottom[g])
// weight[g]: C' x (C * kernel_h * kernel_w)
// bottom[g]: (C * kernel_h * kernel_w) x (H' * W')
// top[g]: C' x H' x W'
for (int g = 0; g < num_groups; ++g) {
const int kernel_size = bottom_C * kernel_h * kernel_w;
const int top_area = top_H * top_W;
const real* const p_temp_g = p_temp_data +
g * kernel_size * top_area;
const real* const p_weight_g = weight5d->data +
g * top_C * kernel_size;
real* const p_top_g = p_top_item + g * top_C * top_area;
// compute Z = alpha * dot(X, Y) + beta * Z
// X (= weight): m x p, Y (= bottom): p x n, Z (= top): m x n
// X, Y, Z: row-major order (e.g., Z[i][j] = Z[i * n + j])
#ifdef GPU
// input arguments:
// cublas handle,
// do_transpose_Y (= false), do_transpose_X (= false),
// n (= H' * W'), m (= C'), p (= C * kernel_h * kernel_w),
// &alpha (= 1),
// &Y, number of columns in Y (= n),
// &X, number of columns in X (= p),
// &beta (= 0),
// &Z, number of columns in Z (= n)
const real one = 1.0f, zero = 0.0f;
cublasSgemm(*((cublasHandle_t*)option->handle),
CUBLAS_OP_N, CUBLAS_OP_N,
top_area, top_C, kernel_size,
&one,
p_temp_g, top_area,
p_weight_g, kernel_size,
&zero,
p_top_g, top_area);
#else
// input arguments:
// is_row_major_order (= true),
// do_transpose_X (= false), do_transpose_Y (= false),
// m (= C'), n (= H' * W'), p (= C * kernel_h * kernel_w),
// alpha (= 1),
// &X, number of columns in X (= p),
// &Y, number of columns in Y (= n),
// beta (= 0),
// &Z, number of columns in Z (= n)
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasNoTrans,
top_C, top_area, kernel_size,
1.0f,
p_weight_g, kernel_size,
p_temp_g, top_area,
0.0f,
p_top_g, top_area);
#endif
}
// compute top[i][j] = top[i][j] + bias[i]
// top: (G * C') x (H' * W')
// bias: (G * C') x 1
if (option->bias) {
const int top_channels = num_groups * top_C;
const int top_area = top_H * top_W;
// the computation is equivalent to...
// top = top + dot(bias, constant)
// constant: 1 x (H' * W'), constant[i] = 1 for all i
#ifdef GPU
// thus, input arguments:
// do_transpose_Y (= false), do_transpose_X (= false),
// n = H' * W', m = G * C', p = 1
// alpha = 1, beta = 1
const real one = 1.0f;
cublasSgemm(*((cublasHandle_t*)option->handle),
CUBLAS_OP_N, CUBLAS_OP_N,
top_area, top_channels, 1,
&one,
const_data, top_area,
bias1d->data, 1,
&one,
p_top_item, top_area);
#else
// input arguments:
// do_transpose_X (= false), do_transpose_Y (= false),
// m = G * C', n = H' * W', p = 1
// alpha = 1, beta = 1
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasNoTrans,
top_channels, top_area, 1,
1.0f,
bias1d->data, 1,
const_data, top_area,
1.0f,
p_top_item, top_area);
#endif
}
// locate next item
{
const int bottom_size = num_groups * bottom_C * bottom_H * bottom_W;
const int top_size = num_groups * top_C * top_H * top_W;
//const int temp_size =
// num_groups * bottom_C * kernel_h * kernel_w * top_H * top_W;
p_bottom_item += bottom_size;
p_top_item += top_size;
//p_temp_data += temp_size;
}
} // endfor batch
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
void conv_shape(const Tensor* const bottom3d,
Tensor* const top3d,
Tensor* const weight5d,
Tensor* const bias1d,
int* const temp_size,
int* const const_size,
const LayerOption* const option)
{
const int num_groups = option->num_groups; // G
const int top_C = option->out_channels / option->num_groups; // C'
const int bottom_C = bottom3d->shape[0][0] / option->num_groups; // C
const int kernel_h = option->kernel_h;
const int kernel_w = option->kernel_w;
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// calculate shape for each item in the batch
int total_size = 0, total_top_area = 0, max_top_area = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
const int top_area = top_H * top_W;
top3d->shape[n][0] = num_groups * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// start position for n-th item in top3d->data
top3d->start[n] = total_size;
total_size += num_groups * top_C * top_H * top_W;
// sum(H' * W') & max(H' * W') in the batch
total_top_area += top_area;
max_top_area = MAX(max_top_area, top_area);
}
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
// weight shape: G x C' x C x kernel_h x kernel_w
weight5d->num_items = 1;
weight5d->ndim = 5;
weight5d->shape[0][0] = num_groups;
weight5d->shape[0][1] = top_C;
weight5d->shape[0][2] = bottom_C;
weight5d->shape[0][3] = kernel_h;
weight5d->shape[0][4] = kernel_w;
weight5d->start[0] = 0;
// bias shape: (G * C') x 1
if (option->bias) {
bias1d->num_items = 1;
bias1d->ndim = 1;
bias1d->shape[0][0] = num_groups * top_C;
bias1d->start[0] = 0;
}
else if (bias1d) {
bias1d->num_items = 0;
bias1d->ndim = 0;
bias1d->shape[0][0] = 0;
bias1d->start[0] = 0;
}
// temporary data size: G * C * kernel_h * kernel_w * sum(H' * W')
*temp_size = num_groups * bottom_C * kernel_h * kernel_w * max_top_area;
// constant data size: max(H' * W')
*const_size = max_top_area;
}
// --------------------------------------------------------------------------
// API code
// --------------------------------------------------------------------------
void forward_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
Tensor* p_bias = (layer->option.bias) ? &layer->params[1] : NULL;
conv_forward(layer->p_bottoms[0], &layer->tops[0],
&layer->params[0], p_bias,
net->temp_data, net->const_data, &layer->option);
print_tensor_info(layer->name, &layer->tops[0]);
}
void shape_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
int temp_size, const_size;
Tensor* p_bias = (layer->option.bias) ? &layer->params[1] : NULL;
conv_shape(layer->p_bottoms[0], &layer->tops[0],
&layer->params[0], p_bias,
&temp_size, &const_size, &layer->option);
update_net_size(net, layer, temp_size, 0, const_size);
}
// --------------------------------------------------------------------------
// test code
// --------------------------------------------------------------------------
#ifdef TEST
int main(int argc, char* argv[])
{
// variable declaration & memory allocation
Tensor X, Y, W, b;
real *X_data = NULL, *Y_data = NULL, *Y_true_data = NULL;
real *W_data = NULL, *b_data = NULL;
real *p_temp_data = NULL, *const_data = NULL, *p_const_data = NULL;
LayerOption option;
int temp_size, const_size;
// set option
{
option.num_groups = 1;
option.out_channels = 512;
option.kernel_h = 1;
option.kernel_w = 1;
option.pad_h = 0;
option.pad_w = 0;
option.stride_h = 1;
option.stride_w = 1;
option.bias = 1;
}
// load data
{
int ndim;
int shape[g_max_ndim];
int total_size;
X_data = load_data("../data/temp/conv_bottom0.bin",
&ndim, shape, NULL);
X.num_items = shape[0];
X.ndim = ndim - 1;
total_size = 0;
for (int n = 0; n < X.num_items; ++n) {
int size_n = 1;
for (int i = 0; i < X.ndim; ++i) {
X.shape[n][i] = shape[i + 1];
size_n *= shape[i + 1];
}
X.start[n] = total_size;
total_size += size_n;
}
conv_shape(&X, &Y, &W, &b, &temp_size, &const_size, &option);
Y_true_data = load_data("../data/temp/conv_top0.bin",
&ndim, shape, NULL);
Y_data = (real*)malloc(flatten_size(&Y) * sizeof(real));
W_data = load_data("../data/temp/conv_param0.bin",
&ndim, shape, NULL);
if (option.bias) {
b_data = load_data("../data/temp/conv_param1.bin",
&ndim, shape, NULL);
const_data = (real*)malloc(const_size * sizeof(real));
for (int i = 0; i < const_size; ++i) {
const_data[i] = 1;
}
}
}
// CUDA initialization
#ifdef GPU
{
printf("set device\n");
cudaSetDevice(0);
option.handle = (cublasHandle_t*)malloc(sizeof(cublasHandle_t));
if (cublasCreate((cublasHandle_t*)option.handle)
!= CUBLAS_STATUS_SUCCESS) {
printf("cublas creation failed\n");
}
}
#endif
// bind loaded data to corresponding tensors
#ifdef GPU
{
const int X_size = flatten_size(&X);
const int Y_size = flatten_size(&Y);
const int W_size = flatten_size(&W);
const int b_size = flatten_size(&b);
printf("gpu malloc\n");
cudaMalloc(&X.data, X_size * sizeof(real));
cudaMalloc(&Y.data, Y_size * sizeof(real));
cudaMalloc(&W.data, W_size * sizeof(real));
cudaMalloc(&p_temp_data, temp_size * sizeof(real));
if (option.bias) {
cudaMalloc(&b.data, b_size * sizeof(real));
cudaMalloc(&p_const_data, const_size * sizeof(real));
}
else {
b.data = NULL;
}
printf("memcpy: cpu -> gpu\n");
cudaMemcpyAsync(X.data, X_data, X_size * sizeof(real),
cudaMemcpyHostToDevice);
cudaMemcpyAsync(W.data, W_data, W_size * sizeof(real),
cudaMemcpyHostToDevice);
if (option.bias) {
cudaMemcpyAsync(b.data, b_data, b_size * sizeof(real),
cudaMemcpyHostToDevice);
cudaMemcpyAsync(p_const_data, const_data, const_size * sizeof(real),
cudaMemcpyHostToDevice);
}
}
#else
{
X.data = X_data;
Y.data = Y_data;
W.data = W_data;
p_temp_data = (real*)malloc(temp_size * sizeof(real));
if (option.bias) {
b.data = b_data;
p_const_data = const_data;
}
else {
b.data = NULL;
}
}
#endif
// do forward operation
{
printf("do forward\n");
conv_forward(&X, &Y, &W, &b, p_temp_data, p_const_data, &option);
}
// copy GPU data to main memory
#ifdef GPU
{
const int Y_size = flatten_size(&Y);
printf("memcpy: cpu <- gpu\n");
cudaMemcpyAsync(Y_data, Y.data, Y_size * sizeof(real),
cudaMemcpyDeviceToHost);
}
#endif
// verify results
{
int i = 0;
printf("verification\n");
for (int n = 0; n < Y.num_items; ++n) {
for (int c = 0; c < Y.shape[n][0]; ++c) {
for (int h = 0; h < Y.shape[n][1]; ++h) {
for (int w = 0; w < Y.shape[n][2]; ++w) {
real diff = ABS(Y_data[i] - Y_true_data[i]);
diff /= 1e-10f + MIN(ABS(Y_data[i]), ABS(Y_true_data[i]));
#ifdef GPU
if (diff > 0) {
printf("Y[%d,%d,%d,%d] = %.6f Y_true[%d,%d,%d,%d] = %.6f\n",
n, c, h, w, Y_data[i], n, c, h, w, Y_true_data[i]);
}
#else
if (diff > 1e-3f) {
printf("Y[%d,%d,%d,%d] = %.6f Y_true[%d,%d,%d,%d] = %.6f\n",
n, c, h, w, Y_data[i], n, c, h, w, Y_true_data[i]);
}
#endif
++i;
} // endfor w
} // endfor h
} // endfor c
} // endfor n
}
// memory deallocation
{
printf("free\n");
free(X_data);
free(Y_data);
free(Y_true_data);
free(W_data);
if (option.bias) {
free(b_data);
free(const_data);
}
}
#ifdef GPU
{
printf("gpu free\n");
cudaFree(X.data);
cudaFree(Y.data);
cudaFree(W.data);
cudaFree(p_temp_data);
if (option.bias) {
cudaFree(b.data);
cudaFree(p_const_data);
}
if (cublasDestroy(*((cublasHandle_t*)option.handle))
!= CUBLAS_STATUS_SUCCESS) {
printf("cublas destruction failed\n");
}
free(option.handle);
}
#else
{
free(p_temp_data);
}
#endif
return 0;
}
#endif // endifdef TEST
|
6db4a1120d0e6af76721b1b0ce8b383196982ea8.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef USE_LEGACY_DSLASH
#include <gauge_field.h>
#include <color_spinor_field.h>
#include <dslash.h>
#include <worker.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_ndeg_twisted_mass_preconditioned.cuh>
/**
This is the preconditioned twisted-mass operator acting on a non-generate
quark doublet.
*/
namespace quda
{
/**
@brief This is a helper class that is used to instantiate the
correct templated kernel for the dslash.
*/
template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg>
struct NdegTwistedMassPreconditionedLaunch {
static constexpr const char *kernel = "quda::ndegTwistedMassPreconditionedGPU"; // kernel name for jit compilation
template <typename Dslash>
inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const hipStream_t &stream)
{
static_assert(nParity == 1, "Non-degenerate twisted-mass operator only defined for nParity=1");
dslash.launch(ndegTwistedMassPreconditionedGPU<Float, nDim, nColor, nParity, dagger, xpay, kernel_type, Arg>, tp,
arg, stream);
}
};
template <typename Float, int nDim, int nColor, typename Arg>
class NdegTwistedMassPreconditioned : public Dslash<Float>
{
protected:
Arg &arg;
const ColorSpinorField ∈
bool shared;
unsigned int sharedBytesPerThread() const
{
return shared ? 2 * nColor * 4 * sizeof(typename mapper<Float>::type) : 0;
}
public:
NdegTwistedMassPreconditioned(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) :
Dslash<Float>(arg, out, in, "kernels/dslash_ndeg_twisted_mass_preconditioned.cuh"),
arg(arg),
in(in),
shared(arg.asymmetric || !arg.dagger)
{
TunableVectorYZ::resizeVector(2, arg.nParity);
if (arg.asymmetric)
for (int i = 0; i < 8; i++)
if (i != 4) { strcat(Dslash<Float>::aux[i], ",asym"); }
}
virtual ~NdegTwistedMassPreconditioned() {}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash<Float>::setParam(arg);
if (arg.asymmetric && !arg.dagger) errorQuda("asymmetric operator only defined for dagger");
if (arg.asymmetric && arg.xpay) errorQuda("asymmetric operator not defined for xpay");
if (arg.nParity == 1) {
if (arg.xpay)
Dslash<Float>::template instantiate<NdegTwistedMassPreconditionedLaunch, nDim, nColor, 1, true>(
tp, arg, stream);
else
Dslash<Float>::template instantiate<NdegTwistedMassPreconditionedLaunch, nDim, nColor, 1, false>(
tp, arg, stream);
} else {
errorQuda("Preconditioned non-degenerate twisted-mass operator not defined nParity=%d", arg.nParity);
}
}
void initTuneParam(TuneParam ¶m) const
{
TunableVectorYZ::initTuneParam(param);
if (shared) {
param.block.y = 2; // flavor must be contained in the block
param.grid.y = 1;
param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z;
}
}
void defaultTuneParam(TuneParam ¶m) const
{
TunableVectorYZ::defaultTuneParam(param);
if (shared) {
param.block.y = 2; // flavor must be contained in the block
param.grid.y = 1;
param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z;
}
}
long long flops() const
{
long long flops = Dslash<Float>::flops();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL: break; // twisted-mass flops are in the interior kernel
case INTERIOR_KERNEL:
case KERNEL_POLICY:
flops += 2 * nColor * 4 * 4 * in.Volume(); // complex * Nc * Ns * fma * vol
break;
}
return flops;
}
TuneKey tuneKey() const
{
return TuneKey(in.VolString(), typeid(*this).name(), Dslash<Float>::aux[arg.kernel_type]);
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct NdegTwistedMassPreconditionedApply {
inline NdegTwistedMassPreconditionedApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double a, double b, double c, bool xpay, const ColorSpinorField &x, int parity, bool dagger, bool asymmetric,
const int *comm_override, TimeProfile &profile)
{
constexpr int nDim = 4;
NdegTwistedMassArg<Float, nColor, recon> arg(
out, in, U, a, b, c, xpay, x, parity, dagger, asymmetric, comm_override);
NdegTwistedMassPreconditioned<Float, nDim, nColor, NdegTwistedMassArg<Float, nColor, recon>> twisted(arg, out, in);
dslash::DslashPolicyTune<decltype(twisted)> policy(twisted,
const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)),
in.getDslashConstant().volume_4d_cb, in.getDslashConstant().ghostFaceCB, profile);
policy.apply(0);
checkCudaError();
}
};
// Apply the non-degenerate twisted-mass Dslash operator
// out(x) = M*in = a*(1 + i*b*gamma_5*tau_3 + c*tau_1)*D + x
// Uses the kappa normalization for the Wilson operator, with a = -kappa.
void ApplyNdegTwistedMassPreconditioned(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double a, double b, double c, bool xpay, const ColorSpinorField &x, int parity, bool dagger, bool asymmetric,
const int *comm_override, TimeProfile &profile)
{
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, x, U);
// check all locations match
checkLocation(out, in, x, U);
// with symmetric dagger operator we must use kernel packing
if (dagger && !asymmetric) pushKernelPackT(true);
instantiate<NdegTwistedMassPreconditionedApply>(
out, in, U, a, b, c, xpay, x, parity, dagger, asymmetric, comm_override, profile);
if (dagger && !asymmetric) popKernelPackT();
#else
errorQuda("Non-degenerate twisted-mass dslash has not been built");
#endif // GPU_NDEG_TWISTED_MASS_DIRAC
}
} // namespace quda
#endif
| 6db4a1120d0e6af76721b1b0ce8b383196982ea8.cu | #ifndef USE_LEGACY_DSLASH
#include <gauge_field.h>
#include <color_spinor_field.h>
#include <dslash.h>
#include <worker.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_ndeg_twisted_mass_preconditioned.cuh>
/**
This is the preconditioned twisted-mass operator acting on a non-generate
quark doublet.
*/
namespace quda
{
/**
@brief This is a helper class that is used to instantiate the
correct templated kernel for the dslash.
*/
template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg>
struct NdegTwistedMassPreconditionedLaunch {
static constexpr const char *kernel = "quda::ndegTwistedMassPreconditionedGPU"; // kernel name for jit compilation
template <typename Dslash>
inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const cudaStream_t &stream)
{
static_assert(nParity == 1, "Non-degenerate twisted-mass operator only defined for nParity=1");
dslash.launch(ndegTwistedMassPreconditionedGPU<Float, nDim, nColor, nParity, dagger, xpay, kernel_type, Arg>, tp,
arg, stream);
}
};
template <typename Float, int nDim, int nColor, typename Arg>
class NdegTwistedMassPreconditioned : public Dslash<Float>
{
protected:
Arg &arg;
const ColorSpinorField ∈
bool shared;
unsigned int sharedBytesPerThread() const
{
return shared ? 2 * nColor * 4 * sizeof(typename mapper<Float>::type) : 0;
}
public:
NdegTwistedMassPreconditioned(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) :
Dslash<Float>(arg, out, in, "kernels/dslash_ndeg_twisted_mass_preconditioned.cuh"),
arg(arg),
in(in),
shared(arg.asymmetric || !arg.dagger)
{
TunableVectorYZ::resizeVector(2, arg.nParity);
if (arg.asymmetric)
for (int i = 0; i < 8; i++)
if (i != 4) { strcat(Dslash<Float>::aux[i], ",asym"); }
}
virtual ~NdegTwistedMassPreconditioned() {}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash<Float>::setParam(arg);
if (arg.asymmetric && !arg.dagger) errorQuda("asymmetric operator only defined for dagger");
if (arg.asymmetric && arg.xpay) errorQuda("asymmetric operator not defined for xpay");
if (arg.nParity == 1) {
if (arg.xpay)
Dslash<Float>::template instantiate<NdegTwistedMassPreconditionedLaunch, nDim, nColor, 1, true>(
tp, arg, stream);
else
Dslash<Float>::template instantiate<NdegTwistedMassPreconditionedLaunch, nDim, nColor, 1, false>(
tp, arg, stream);
} else {
errorQuda("Preconditioned non-degenerate twisted-mass operator not defined nParity=%d", arg.nParity);
}
}
void initTuneParam(TuneParam ¶m) const
{
TunableVectorYZ::initTuneParam(param);
if (shared) {
param.block.y = 2; // flavor must be contained in the block
param.grid.y = 1;
param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z;
}
}
void defaultTuneParam(TuneParam ¶m) const
{
TunableVectorYZ::defaultTuneParam(param);
if (shared) {
param.block.y = 2; // flavor must be contained in the block
param.grid.y = 1;
param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z;
}
}
long long flops() const
{
long long flops = Dslash<Float>::flops();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL: break; // twisted-mass flops are in the interior kernel
case INTERIOR_KERNEL:
case KERNEL_POLICY:
flops += 2 * nColor * 4 * 4 * in.Volume(); // complex * Nc * Ns * fma * vol
break;
}
return flops;
}
TuneKey tuneKey() const
{
return TuneKey(in.VolString(), typeid(*this).name(), Dslash<Float>::aux[arg.kernel_type]);
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct NdegTwistedMassPreconditionedApply {
inline NdegTwistedMassPreconditionedApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double a, double b, double c, bool xpay, const ColorSpinorField &x, int parity, bool dagger, bool asymmetric,
const int *comm_override, TimeProfile &profile)
{
constexpr int nDim = 4;
NdegTwistedMassArg<Float, nColor, recon> arg(
out, in, U, a, b, c, xpay, x, parity, dagger, asymmetric, comm_override);
NdegTwistedMassPreconditioned<Float, nDim, nColor, NdegTwistedMassArg<Float, nColor, recon>> twisted(arg, out, in);
dslash::DslashPolicyTune<decltype(twisted)> policy(twisted,
const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)),
in.getDslashConstant().volume_4d_cb, in.getDslashConstant().ghostFaceCB, profile);
policy.apply(0);
checkCudaError();
}
};
// Apply the non-degenerate twisted-mass Dslash operator
// out(x) = M*in = a*(1 + i*b*gamma_5*tau_3 + c*tau_1)*D + x
// Uses the kappa normalization for the Wilson operator, with a = -kappa.
void ApplyNdegTwistedMassPreconditioned(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double a, double b, double c, bool xpay, const ColorSpinorField &x, int parity, bool dagger, bool asymmetric,
const int *comm_override, TimeProfile &profile)
{
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, x, U);
// check all locations match
checkLocation(out, in, x, U);
// with symmetric dagger operator we must use kernel packing
if (dagger && !asymmetric) pushKernelPackT(true);
instantiate<NdegTwistedMassPreconditionedApply>(
out, in, U, a, b, c, xpay, x, parity, dagger, asymmetric, comm_override, profile);
if (dagger && !asymmetric) popKernelPackT();
#else
errorQuda("Non-degenerate twisted-mass dslash has not been built");
#endif // GPU_NDEG_TWISTED_MASS_DIRAC
}
} // namespace quda
#endif
|
c54b23fec22d5bd0c36d6e835586d4ccba064acd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "matrix/math.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Matrix {
template <typename Type>
__global__ void nativePowerKernel(Type *in, Type *out, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = in[idx] * in[idx];
}
}
template <typename Type>
void naivePower(Type *in, Type *out, int len, hipStream_t stream) {
static const int TPB = 64;
int nblks = ceildiv(len, TPB);
hipLaunchKernelGGL(( nativePowerKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, in, out, len);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename Type>
__global__ void nativeSqrtKernel(Type *in, Type *out, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = sqrt(in[idx]);
}
}
template <typename Type>
void naiveSqrt(Type *in, Type *out, int len) {
static const int TPB = 64;
int nblks = ceildiv(len, TPB);
hipLaunchKernelGGL(( nativeSqrtKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, in, out, len);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename Type>
__global__ void naiveSignFlipKernel(Type *in, Type *out, int rowCount,
int colCount) {
int d_i = blockIdx.x * rowCount;
int end = d_i + rowCount;
if (blockIdx.x < colCount) {
Type max = 0.0;
int max_index = 0;
for (int i = d_i; i < end; i++) {
Type val = in[i];
if (val < 0.0) {
val = -val;
}
if (val > max) {
max = val;
max_index = i;
}
}
for (int i = d_i; i < end; i++) {
if (in[max_index] < 0.0) {
out[i] = -in[i];
} else {
out[i] = in[i];
}
}
}
__syncthreads();
}
template <typename Type>
void naiveSignFlip(Type *in, Type *out, int rowCount, int colCount) {
hipLaunchKernelGGL(( naiveSignFlipKernel<Type>), dim3(colCount), dim3(1), 0, 0, in, out, rowCount, colCount);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct MathInputs {
T tolerance;
int n_row;
int n_col;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MathInputs<T> &dims) {
return os;
}
template <typename T>
class MathTest : public ::testing::TestWithParam<MathInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MathInputs<T>>::GetParam();
Random::Rng r(params.seed);
int len = params.len;
allocate(in_power, len);
allocate(out_power_ref, len);
allocate(in_sqrt, len);
allocate(out_sqrt_ref, len);
allocate(in_sign_flip, len);
allocate(out_sign_flip_ref, len);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(in_ratio, 4);
T in_ratio_h[4] = {1.0, 2.0, 2.0, 3.0};
updateDevice(in_ratio, in_ratio_h, 4, stream);
allocate(out_ratio_ref, 4);
T out_ratio_ref_h[4] = {0.125, 0.25, 0.25, 0.375};
updateDevice(out_ratio_ref, out_ratio_ref_h, 4, stream);
r.uniform(in_power, len, T(-1.0), T(1.0), stream);
r.uniform(in_sqrt, len, T(0.0), T(1.0), stream);
// r.uniform(in_ratio, len, T(0.0), T(1.0));
r.uniform(in_sign_flip, len, T(-100.0), T(100.0), stream);
naivePower(in_power, out_power_ref, len, stream);
power(in_power, len, stream);
naiveSqrt(in_sqrt, out_sqrt_ref, len);
seqRoot(in_sqrt, len, stream);
auto mgr = makeDefaultAllocator();
ratio(in_ratio, in_ratio, 4, mgr, stream);
naiveSignFlip(in_sign_flip, out_sign_flip_ref, params.n_row, params.n_col);
signFlip(in_sign_flip, params.n_row, params.n_col, stream);
allocate(in_recip, 4);
allocate(in_recip_ref, 4);
allocate(out_recip, 4);
// default threshold is 1e-15
std::vector<T> in_recip_h = {0.1, 0.01, -0.01, 0.1e-16};
std::vector<T> in_recip_ref_h = {10.0, 100.0, -100.0, 0.0};
updateDevice(in_recip, in_recip_h.data(), 4, stream);
updateDevice(in_recip_ref, in_recip_ref_h.data(), 4, stream);
T recip_scalar = T(1.0);
// this `reciprocal()` has to go first bc next one modifies its input
reciprocal(in_recip, out_recip, recip_scalar, 4, stream);
reciprocal(in_recip, recip_scalar, 4, stream, true);
std::vector<T> in_small_val_zero_h = {0.1, 1e-16, -1e-16, -0.1};
std::vector<T> in_small_val_zero_ref_h = {0.1, 0.0, 0.0, -0.1};
allocate(in_smallzero, 4);
allocate(out_smallzero, 4);
allocate(out_smallzero_ref, 4);
updateDevice(in_smallzero, in_small_val_zero_h.data(), 4, stream);
updateDevice(out_smallzero_ref, in_small_val_zero_ref_h.data(), 4, stream);
setSmallValuesZero(out_smallzero, in_smallzero, 4, stream);
setSmallValuesZero(in_smallzero, 4, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(in_power));
CUDA_CHECK(hipFree(out_power_ref));
CUDA_CHECK(hipFree(in_sqrt));
CUDA_CHECK(hipFree(out_sqrt_ref));
CUDA_CHECK(hipFree(in_ratio));
CUDA_CHECK(hipFree(out_ratio_ref));
CUDA_CHECK(hipFree(in_sign_flip));
CUDA_CHECK(hipFree(out_sign_flip_ref));
CUDA_CHECK(hipFree(in_recip));
CUDA_CHECK(hipFree(in_recip_ref));
CUDA_CHECK(hipFree(out_recip));
CUDA_CHECK(hipFree(in_smallzero));
CUDA_CHECK(hipFree(out_smallzero));
CUDA_CHECK(hipFree(out_smallzero_ref));
}
protected:
MathInputs<T> params;
T *in_power, *out_power_ref, *in_sqrt, *out_sqrt_ref, *in_ratio,
*out_ratio_ref, *in_sign_flip, *out_sign_flip_ref, *in_recip,
*in_recip_ref, *out_recip, *in_smallzero, *out_smallzero, *out_smallzero_ref;
};
const std::vector<MathInputs<float>> inputsf = {
{0.00001f, 1024, 1024, 1024 * 1024, 1234ULL}};
const std::vector<MathInputs<double>> inputsd = {
{0.00001, 1024, 1024, 1024 * 1024, 1234ULL}};
typedef MathTest<float> MathPowerTestF;
TEST_P(MathPowerTestF, Result) {
ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathPowerTestD;
TEST_P(MathPowerTestD, Result) {
ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSqrtTestF;
TEST_P(MathSqrtTestF, Result) {
ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSqrtTestD;
TEST_P(MathSqrtTestD, Result) {
ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathRatioTestF;
TEST_P(MathRatioTestF, Result) {
ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathRatioTestD;
TEST_P(MathRatioTestD, Result) {
ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSignFlipTestF;
TEST_P(MathSignFlipTestF, Result) {
ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSignFlipTestD;
TEST_P(MathSignFlipTestD, Result) {
ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathReciprocalTestF;
TEST_P(MathReciprocalTestF, Result) {
ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4,
CompareApprox<float>(params.tolerance)));
// 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.
ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathReciprocalTestD;
TEST_P(MathReciprocalTestD, Result) {
ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4,
CompareApprox<double>(params.tolerance)));
// 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.
ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSetSmallZeroTestF;
TEST_P(MathSetSmallZeroTestF, Result) {
ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSetSmallZeroTestD;
TEST_P(MathSetSmallZeroTestD, Result) {
ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestD,
::testing::ValuesIn(inputsd));
} // end namespace Matrix
} // end namespace MLCommon
| c54b23fec22d5bd0c36d6e835586d4ccba064acd.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "matrix/math.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Matrix {
template <typename Type>
__global__ void nativePowerKernel(Type *in, Type *out, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = in[idx] * in[idx];
}
}
template <typename Type>
void naivePower(Type *in, Type *out, int len, cudaStream_t stream) {
static const int TPB = 64;
int nblks = ceildiv(len, TPB);
nativePowerKernel<Type><<<nblks, TPB, 0, stream>>>(in, out, len);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename Type>
__global__ void nativeSqrtKernel(Type *in, Type *out, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = sqrt(in[idx]);
}
}
template <typename Type>
void naiveSqrt(Type *in, Type *out, int len) {
static const int TPB = 64;
int nblks = ceildiv(len, TPB);
nativeSqrtKernel<Type><<<nblks, TPB>>>(in, out, len);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename Type>
__global__ void naiveSignFlipKernel(Type *in, Type *out, int rowCount,
int colCount) {
int d_i = blockIdx.x * rowCount;
int end = d_i + rowCount;
if (blockIdx.x < colCount) {
Type max = 0.0;
int max_index = 0;
for (int i = d_i; i < end; i++) {
Type val = in[i];
if (val < 0.0) {
val = -val;
}
if (val > max) {
max = val;
max_index = i;
}
}
for (int i = d_i; i < end; i++) {
if (in[max_index] < 0.0) {
out[i] = -in[i];
} else {
out[i] = in[i];
}
}
}
__syncthreads();
}
template <typename Type>
void naiveSignFlip(Type *in, Type *out, int rowCount, int colCount) {
naiveSignFlipKernel<Type><<<colCount, 1>>>(in, out, rowCount, colCount);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct MathInputs {
T tolerance;
int n_row;
int n_col;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MathInputs<T> &dims) {
return os;
}
template <typename T>
class MathTest : public ::testing::TestWithParam<MathInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MathInputs<T>>::GetParam();
Random::Rng r(params.seed);
int len = params.len;
allocate(in_power, len);
allocate(out_power_ref, len);
allocate(in_sqrt, len);
allocate(out_sqrt_ref, len);
allocate(in_sign_flip, len);
allocate(out_sign_flip_ref, len);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(in_ratio, 4);
T in_ratio_h[4] = {1.0, 2.0, 2.0, 3.0};
updateDevice(in_ratio, in_ratio_h, 4, stream);
allocate(out_ratio_ref, 4);
T out_ratio_ref_h[4] = {0.125, 0.25, 0.25, 0.375};
updateDevice(out_ratio_ref, out_ratio_ref_h, 4, stream);
r.uniform(in_power, len, T(-1.0), T(1.0), stream);
r.uniform(in_sqrt, len, T(0.0), T(1.0), stream);
// r.uniform(in_ratio, len, T(0.0), T(1.0));
r.uniform(in_sign_flip, len, T(-100.0), T(100.0), stream);
naivePower(in_power, out_power_ref, len, stream);
power(in_power, len, stream);
naiveSqrt(in_sqrt, out_sqrt_ref, len);
seqRoot(in_sqrt, len, stream);
auto mgr = makeDefaultAllocator();
ratio(in_ratio, in_ratio, 4, mgr, stream);
naiveSignFlip(in_sign_flip, out_sign_flip_ref, params.n_row, params.n_col);
signFlip(in_sign_flip, params.n_row, params.n_col, stream);
allocate(in_recip, 4);
allocate(in_recip_ref, 4);
allocate(out_recip, 4);
// default threshold is 1e-15
std::vector<T> in_recip_h = {0.1, 0.01, -0.01, 0.1e-16};
std::vector<T> in_recip_ref_h = {10.0, 100.0, -100.0, 0.0};
updateDevice(in_recip, in_recip_h.data(), 4, stream);
updateDevice(in_recip_ref, in_recip_ref_h.data(), 4, stream);
T recip_scalar = T(1.0);
// this `reciprocal()` has to go first bc next one modifies its input
reciprocal(in_recip, out_recip, recip_scalar, 4, stream);
reciprocal(in_recip, recip_scalar, 4, stream, true);
std::vector<T> in_small_val_zero_h = {0.1, 1e-16, -1e-16, -0.1};
std::vector<T> in_small_val_zero_ref_h = {0.1, 0.0, 0.0, -0.1};
allocate(in_smallzero, 4);
allocate(out_smallzero, 4);
allocate(out_smallzero_ref, 4);
updateDevice(in_smallzero, in_small_val_zero_h.data(), 4, stream);
updateDevice(out_smallzero_ref, in_small_val_zero_ref_h.data(), 4, stream);
setSmallValuesZero(out_smallzero, in_smallzero, 4, stream);
setSmallValuesZero(in_smallzero, 4, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in_power));
CUDA_CHECK(cudaFree(out_power_ref));
CUDA_CHECK(cudaFree(in_sqrt));
CUDA_CHECK(cudaFree(out_sqrt_ref));
CUDA_CHECK(cudaFree(in_ratio));
CUDA_CHECK(cudaFree(out_ratio_ref));
CUDA_CHECK(cudaFree(in_sign_flip));
CUDA_CHECK(cudaFree(out_sign_flip_ref));
CUDA_CHECK(cudaFree(in_recip));
CUDA_CHECK(cudaFree(in_recip_ref));
CUDA_CHECK(cudaFree(out_recip));
CUDA_CHECK(cudaFree(in_smallzero));
CUDA_CHECK(cudaFree(out_smallzero));
CUDA_CHECK(cudaFree(out_smallzero_ref));
}
protected:
MathInputs<T> params;
T *in_power, *out_power_ref, *in_sqrt, *out_sqrt_ref, *in_ratio,
*out_ratio_ref, *in_sign_flip, *out_sign_flip_ref, *in_recip,
*in_recip_ref, *out_recip, *in_smallzero, *out_smallzero, *out_smallzero_ref;
};
const std::vector<MathInputs<float>> inputsf = {
{0.00001f, 1024, 1024, 1024 * 1024, 1234ULL}};
const std::vector<MathInputs<double>> inputsd = {
{0.00001, 1024, 1024, 1024 * 1024, 1234ULL}};
typedef MathTest<float> MathPowerTestF;
TEST_P(MathPowerTestF, Result) {
ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathPowerTestD;
TEST_P(MathPowerTestD, Result) {
ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSqrtTestF;
TEST_P(MathSqrtTestF, Result) {
ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSqrtTestD;
TEST_P(MathSqrtTestD, Result) {
ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathRatioTestF;
TEST_P(MathRatioTestF, Result) {
ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathRatioTestD;
TEST_P(MathRatioTestD, Result) {
ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSignFlipTestF;
TEST_P(MathSignFlipTestF, Result) {
ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSignFlipTestD;
TEST_P(MathSignFlipTestD, Result) {
ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathReciprocalTestF;
TEST_P(MathReciprocalTestF, Result) {
ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4,
CompareApprox<float>(params.tolerance)));
// 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.
ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathReciprocalTestD;
TEST_P(MathReciprocalTestD, Result) {
ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4,
CompareApprox<double>(params.tolerance)));
// 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.
ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSetSmallZeroTestF;
TEST_P(MathSetSmallZeroTestF, Result) {
ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSetSmallZeroTestD;
TEST_P(MathSetSmallZeroTestD, Result) {
ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestD,
::testing::ValuesIn(inputsd));
} // end namespace Matrix
} // end namespace MLCommon
|
8248756adadd9a230c75854e8be714616c813bca.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
//#include <hip/hip_runtime.h>
// includes, project
//#include <helper_cuda.h>
//#include <helper_functions.h> // helper utility functions
__global__ void hello_cuda()
{
printf("Hello CUDA!\n");
//printf("zhou jinxing 333\n");
}
int main()
{
hipLaunchKernelGGL(( hello_cuda), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
/*
__global__ void increment_kernel(int *g_data, int inc_value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + inc_value;
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
hipDeviceProp_t deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
int n = 16 * 1024 * 1024;
int nbytes = n * sizeof(int);
int value = 26;
// allocate host memory
int *a = 0;
checkCudaErrors(hipHostMalloc((void **)&a, nbytes));
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
checkCudaErrors(hipMalloc((void **)&d_a, nbytes));
checkCudaErrors(hipMemset(d_a, 255, nbytes));
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// create cuda event handles
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(hipDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
hipEventRecord(start, 0);
hipMemcpyAsync(d_a, a, nbytes, hipMemcpyHostToDevice, 0);
increment_kernel<<<blocks, threads, 0, 0>>>(d_a, value);
hipMemcpyAsync(a, d_a, nbytes, hipMemcpyDeviceToHost, 0);
hipEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (hipEventQuery(stop) == hipErrorNotReady)
{
counter++;
}
checkCudaErrors(hipEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
bool bFinalResults = correct_output(a, n, value);
// release resources
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipHostFree(a));
checkCudaErrors(hipFree(d_a));
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
*/
| 8248756adadd9a230c75854e8be714616c813bca.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
//#include <cuda_runtime.h>
// includes, project
//#include <helper_cuda.h>
//#include <helper_functions.h> // helper utility functions
__global__ void hello_cuda()
{
printf("Hello CUDA!\n");
//printf("zhou jinxing 333\n");
}
int main()
{
hello_cuda<<<1,1>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
/*
__global__ void increment_kernel(int *g_data, int inc_value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + inc_value;
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
cudaDeviceProp deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
int n = 16 * 1024 * 1024;
int nbytes = n * sizeof(int);
int value = 26;
// allocate host memory
int *a = 0;
checkCudaErrors(cudaMallocHost((void **)&a, nbytes));
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
checkCudaErrors(cudaMemset(d_a, 255, nbytes));
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// create cuda event handles
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(cudaDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
cudaMemcpyAsync(d_a, a, nbytes, cudaMemcpyHostToDevice, 0);
increment_kernel<<<blocks, threads, 0, 0>>>(d_a, value);
cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
bool bFinalResults = correct_output(a, n, value);
// release resources
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFreeHost(a));
checkCudaErrors(cudaFree(d_a));
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
*/
|
5b8d135898a665930c95e3d9a96b47e1fa885529.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/optimizers/adam_op.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename MT>
__global__ void AdamKernelREG(MT beta1, MT beta2, MT epsilon, MT beta1_pow_,
MT beta2_pow_, const MT* moment1, MT* moment1_out,
const MT* moment2, MT* moment2_out, const MT* lr_,
const T* grad, const T* param, T* param_out,
const MT* master_param, MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = beta1_pow_;
MT beta2_pow = beta2_pow_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = moment1[id];
MT mom2 = moment2[id];
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 /
(sqrt(mom2) + epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T, typename MT>
__global__ void AdamKernelMEM(MT beta1, MT beta2, MT epsilon,
const MT* beta1_pow_, const MT* beta2_pow_,
const MT* moment1, MT* moment1_out,
const MT* moment2, MT* moment2_out, const MT* lr_,
const T* grad, const T* param, T* param_out,
const MT* master_param, MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = *beta1_pow_;
MT beta2_pow = *beta2_pow_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = static_cast<MT>(moment1[id]);
MT mom2 = static_cast<MT>(moment2[id]);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 /
(sqrt(mom2) + epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T>
__global__ void UpdateBetaPow(T beta1, T beta2, const T* beta1_pow_,
const T* beta2_pow_, T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamCUDAKernelREG(
MT beta1, MT beta2, MT epsilon, const MT beta1_pow, const MT beta2_pow,
const MT* mom1_, MT* mom1_out_, const MT* mom2_, MT* mom2_out_,
const MT* lr_, const T* grad_, const T* param_, T* param_out_,
const MT* master_param, MT* master_param_out, const int64_t* rows_,
int64_t row_numel, int64_t row_count, bool lazy_mode, int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
math::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = mom1_[id];
MT mom2 = mom2_[id];
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 / (sqrt(mom2) +
epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T>
class AdamOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be LoDTensor, "
"but the received is %s",
ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type())));
using paddle::framework::LoDTensor;
using MPDType = typename details::MPTypeTrait<T>::Type;
int64_t min_row_size_to_use_multithread =
ctx.Attr<int64_t>("min_row_size_to_use_multithread");
bool lazy_mode = ctx.Attr<bool>("lazy_mode");
MPDType epsilon = static_cast<MPDType>(ctx.Attr<float>("epsilon"));
auto* param = ctx.Input<LoDTensor>("Param");
auto* grad_var = ctx.InputVar("Grad");
auto* mom1 = ctx.Input<LoDTensor>("Moment1");
auto* mom2 = ctx.Input<LoDTensor>("Moment2");
auto* lr = ctx.Input<LoDTensor>("LearningRate");
auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow");
auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow");
auto* param_out = ctx.Output<LoDTensor>("ParamOut");
auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out");
auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out");
auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut");
auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut");
MPDType beta1 = static_cast<MPDType>(ctx.Attr<float>("beta1"));
if (ctx.HasInput("Beta1Tensor")) {
auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor");
PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(Beta1Tensor) size must be 1, but get %d",
beta1_tensor->numel()));
beta1 = static_cast<MPDType>(GetAttrFromTensor(beta1_tensor));
}
MPDType beta2 = static_cast<MPDType>(ctx.Attr<float>("beta2"));
if (ctx.HasInput("Beta2Tensor")) {
auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor");
PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(Beta2Tensor) size must be 1, but get %d",
beta2_tensor->numel()));
beta2 = static_cast<MPDType>(GetAttrFromTensor(beta2_tensor));
}
VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel()
<< "beta2_pow.numel() : " << beta2_pow->numel();
VLOG(3) << "param.numel(): " << param->numel();
PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1,
platform::errors::InvalidArgument(
"beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1,
platform::errors::InvalidArgument(
"beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const bool multi_precision = ctx.Attr<bool>("multi_precision");
const LoDTensor* master_param = nullptr;
LoDTensor* master_param_out = nullptr;
if (multi_precision) {
bool has_master =
ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut");
PADDLE_ENFORCE_EQ(has_master, true,
platform::errors::InvalidArgument(
"The Input(MasterParam) and Output(MasterParamOut) "
"should not be null when "
"the attr `multi_precision` is true"));
master_param = ctx.Input<LoDTensor>("MasterParam");
master_param_out = ctx.Output<LoDTensor>("MasterParamOut");
}
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision
? master_param_out->mutable_data<MPDType>(ctx.GetPlace())
: nullptr;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
if (grad_var->IsType<framework::LoDTensor>()) {
auto* grad = ctx.Input<LoDTensor>("Grad");
// update param and moment
int threads = 512;
int blocks = (param->numel() + threads - 1) / threads;
if (beta1_pow->place() == platform::CPUPlace() &&
beta2_pow->place() == platform::CPUPlace()) {
// Compute with betapow in REG
hipLaunchKernelGGL(( AdamKernelREG<T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1, beta2, epsilon, *beta1_pow->data<MPDType>(),
*beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad->data<T>(), param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, param->numel());
// Cpu update
beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta1 * beta1_pow->data<MPDType>()[0];
beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta2 * beta2_pow->data<MPDType>()[0];
} else {
hipLaunchKernelGGL(( AdamKernelMEM<T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1, beta2, epsilon, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad->data<T>(), param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, param->numel());
// Update with gpu
hipLaunchKernelGGL(( UpdateBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(),
beta1, beta2, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(),
beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()),
beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace()));
}
} else if (grad_var->IsType<framework::SelectedRows>()) {
auto* grad = ctx.Input<framework::SelectedRows>("Grad");
if (grad->rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
framework::SelectedRows tmp_grad_merge;
const framework::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func;
merge_func(ctx.template device_context<platform::CUDADeviceContext>(),
*grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow->place() == platform::CPUPlace() &&
beta2_pow->place() == platform::CPUPlace()) {
int threads = 512;
int ndim = param->numel();
int blocks = (ndim + threads - 1) / threads;
hipLaunchKernelGGL(( SparseAdamCUDAKernelREG<
T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1, beta2, epsilon, *beta1_pow->data<MPDType>(),
*beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad_data, param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, rows, row_numel, grad_merge.rows().size(),
lazy_mode, ndim);
// Update with cpu
beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta1 * beta1_pow->data<MPDType>()[0];
beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta2 * beta2_pow->data<MPDType>()[0];
} else {
SparseAdamFunctor<T, GPUAdam, MPDType> functor(
beta1, beta2, epsilon, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad_data, param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, rows, row_numel, grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
platform::ForRange<platform::CUDADeviceContext> for_range(
static_cast<const platform::CUDADeviceContext&>(
ctx.device_context()),
param->numel());
for_range(functor);
// update beta1 and beta2
hipLaunchKernelGGL(( UpdateBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(),
beta1, beta2, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(),
beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()),
beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace()));
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Variable type not supported by adam_op"));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(adam, ops::AdamOpCUDAKernel<float>,
ops::AdamOpCUDAKernel<double>,
ops::AdamOpCUDAKernel<plat::float16>);
| 5b8d135898a665930c95e3d9a96b47e1fa885529.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/optimizers/adam_op.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename MT>
__global__ void AdamKernelREG(MT beta1, MT beta2, MT epsilon, MT beta1_pow_,
MT beta2_pow_, const MT* moment1, MT* moment1_out,
const MT* moment2, MT* moment2_out, const MT* lr_,
const T* grad, const T* param, T* param_out,
const MT* master_param, MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = beta1_pow_;
MT beta2_pow = beta2_pow_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = moment1[id];
MT mom2 = moment2[id];
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 /
(sqrt(mom2) + epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T, typename MT>
__global__ void AdamKernelMEM(MT beta1, MT beta2, MT epsilon,
const MT* beta1_pow_, const MT* beta2_pow_,
const MT* moment1, MT* moment1_out,
const MT* moment2, MT* moment2_out, const MT* lr_,
const T* grad, const T* param, T* param_out,
const MT* master_param, MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = *beta1_pow_;
MT beta2_pow = *beta2_pow_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = static_cast<MT>(moment1[id]);
MT mom2 = static_cast<MT>(moment2[id]);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 /
(sqrt(mom2) + epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T>
__global__ void UpdateBetaPow(T beta1, T beta2, const T* beta1_pow_,
const T* beta2_pow_, T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamCUDAKernelREG(
MT beta1, MT beta2, MT epsilon, const MT beta1_pow, const MT beta2_pow,
const MT* mom1_, MT* mom1_out_, const MT* mom2_, MT* mom2_out_,
const MT* lr_, const T* grad_, const T* param_, T* param_out_,
const MT* master_param, MT* master_param_out, const int64_t* rows_,
int64_t row_numel, int64_t row_count, bool lazy_mode, int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
math::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = mom1_[id];
MT mom2 = mom2_[id];
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 / (sqrt(mom2) +
epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T>
class AdamOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be LoDTensor, "
"but the received is %s",
ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type())));
using paddle::framework::LoDTensor;
using MPDType = typename details::MPTypeTrait<T>::Type;
int64_t min_row_size_to_use_multithread =
ctx.Attr<int64_t>("min_row_size_to_use_multithread");
bool lazy_mode = ctx.Attr<bool>("lazy_mode");
MPDType epsilon = static_cast<MPDType>(ctx.Attr<float>("epsilon"));
auto* param = ctx.Input<LoDTensor>("Param");
auto* grad_var = ctx.InputVar("Grad");
auto* mom1 = ctx.Input<LoDTensor>("Moment1");
auto* mom2 = ctx.Input<LoDTensor>("Moment2");
auto* lr = ctx.Input<LoDTensor>("LearningRate");
auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow");
auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow");
auto* param_out = ctx.Output<LoDTensor>("ParamOut");
auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out");
auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out");
auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut");
auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut");
MPDType beta1 = static_cast<MPDType>(ctx.Attr<float>("beta1"));
if (ctx.HasInput("Beta1Tensor")) {
auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor");
PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(Beta1Tensor) size must be 1, but get %d",
beta1_tensor->numel()));
beta1 = static_cast<MPDType>(GetAttrFromTensor(beta1_tensor));
}
MPDType beta2 = static_cast<MPDType>(ctx.Attr<float>("beta2"));
if (ctx.HasInput("Beta2Tensor")) {
auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor");
PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(Beta2Tensor) size must be 1, but get %d",
beta2_tensor->numel()));
beta2 = static_cast<MPDType>(GetAttrFromTensor(beta2_tensor));
}
VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel()
<< "beta2_pow.numel() : " << beta2_pow->numel();
VLOG(3) << "param.numel(): " << param->numel();
PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1,
platform::errors::InvalidArgument(
"beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1,
platform::errors::InvalidArgument(
"beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const bool multi_precision = ctx.Attr<bool>("multi_precision");
const LoDTensor* master_param = nullptr;
LoDTensor* master_param_out = nullptr;
if (multi_precision) {
bool has_master =
ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut");
PADDLE_ENFORCE_EQ(has_master, true,
platform::errors::InvalidArgument(
"The Input(MasterParam) and Output(MasterParamOut) "
"should not be null when "
"the attr `multi_precision` is true"));
master_param = ctx.Input<LoDTensor>("MasterParam");
master_param_out = ctx.Output<LoDTensor>("MasterParamOut");
}
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision
? master_param_out->mutable_data<MPDType>(ctx.GetPlace())
: nullptr;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
if (grad_var->IsType<framework::LoDTensor>()) {
auto* grad = ctx.Input<LoDTensor>("Grad");
// update param and moment
int threads = 512;
int blocks = (param->numel() + threads - 1) / threads;
if (beta1_pow->place() == platform::CPUPlace() &&
beta2_pow->place() == platform::CPUPlace()) {
// Compute with betapow in REG
AdamKernelREG<T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1, beta2, epsilon, *beta1_pow->data<MPDType>(),
*beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad->data<T>(), param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, param->numel());
// Cpu update
beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta1 * beta1_pow->data<MPDType>()[0];
beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta2 * beta2_pow->data<MPDType>()[0];
} else {
AdamKernelMEM<T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1, beta2, epsilon, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad->data<T>(), param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, param->numel());
// Update with gpu
UpdateBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>(
beta1, beta2, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(),
beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()),
beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace()));
}
} else if (grad_var->IsType<framework::SelectedRows>()) {
auto* grad = ctx.Input<framework::SelectedRows>("Grad");
if (grad->rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
framework::SelectedRows tmp_grad_merge;
const framework::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func;
merge_func(ctx.template device_context<platform::CUDADeviceContext>(),
*grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow->place() == platform::CPUPlace() &&
beta2_pow->place() == platform::CPUPlace()) {
int threads = 512;
int ndim = param->numel();
int blocks = (ndim + threads - 1) / threads;
SparseAdamCUDAKernelREG<
T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1, beta2, epsilon, *beta1_pow->data<MPDType>(),
*beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad_data, param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, rows, row_numel, grad_merge.rows().size(),
lazy_mode, ndim);
// Update with cpu
beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta1 * beta1_pow->data<MPDType>()[0];
beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta2 * beta2_pow->data<MPDType>()[0];
} else {
SparseAdamFunctor<T, GPUAdam, MPDType> functor(
beta1, beta2, epsilon, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad_data, param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, rows, row_numel, grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
platform::ForRange<platform::CUDADeviceContext> for_range(
static_cast<const platform::CUDADeviceContext&>(
ctx.device_context()),
param->numel());
for_range(functor);
// update beta1 and beta2
UpdateBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>(
beta1, beta2, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(),
beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()),
beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace()));
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Variable type not supported by adam_op"));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(adam, ops::AdamOpCUDAKernel<float>,
ops::AdamOpCUDAKernel<double>,
ops::AdamOpCUDAKernel<plat::float16>);
|
56913b582a003bb6906a929726360661e7d8474a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
#include "stepper.cuh"
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <stdbool.h>
#ifndef RESTRICT
#define restrict __restrict__
#endif /* RESTRICT */
//ldoc on
/**
* ## Implementation
*
* ### Structure allocation
*/
void print_array(float* array, int len) {
for(int i = 0; i < len; i++) {
printf("%.2f ", array[i]);
}
printf("\n");
}
extern "C"
central2d_t* central2d_init(float w, float h, int nx, int ny,
int nfield, flux_t flux, speed_t speed,
float cfl)
{
// We extend to a four cell buffer to avoid BC comm on odd time steps
int ng = 4;
central2d_t* sim;
hipMallocManaged(&sim, sizeof(central2d_t));
sim->nx = nx;
sim->ny = ny;
sim->ng = ng;
sim->nfield = nfield;
sim->dx = w/nx;
sim->dy = h/ny;
sim->flux = flux;
sim->speed = speed;
sim->cfl = cfl;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int nc = nx_all * ny_all;
int N = nfield * nc;
hipMallocManaged(&sim->u, (4*N + 6*nx_all)* sizeof(float));
sim->v = sim->u + N;
sim->f = sim->u + 2*N;
sim->g = sim->u + 3*N;
sim->scratch = sim->u + 4*N;
return sim;
}
extern "C"
void central2d_free(central2d_t* sim)
{
hipFree(sim->u);
hipFree(sim);
}
extern "C"
int central2d_offset(central2d_t* sim, int k, int ix, int iy)
{
int nx = sim->nx, ny = sim->ny, ng = sim->ng;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
return (k*ny_all+(ng+iy))*nx_all+(ng+ix);
}
/**
* ### Boundary conditions
*
* In finite volume methods, boundary conditions are typically applied by
* setting appropriate values in ghost cells. For our framework, we will
* apply periodic boundary conditions; that is, waves that exit one side
* of the domain will enter from the other side.
*
* We apply the conditions by assuming that the cells with coordinates
* `nghost <= ix <= nx+nghost` and `nghost <= iy <= ny+nghost` are
* "canonical", and setting the values for all other cells `(ix,iy)`
* to the corresponding canonical values `(ix+p*nx,iy+q*ny)` for some
* integers `p` and `q`.
*/
static inline
void copy_subgrid(float* restrict dst,
const float* restrict src,
int nx, int ny, int stride)
{
for (int iy = 0; iy < ny; ++iy)
for (int ix = 0; ix < nx; ++ix)
dst[iy*stride+ix] = src[iy*stride+ix];
}
// Change u
extern "C"
void central2d_periodic(float* restrict u,
int nx, int ny, int ng, int nfield)
{
// Stride and number per field
int s = nx + 2*ng;
int field_stride = (ny+2*ng)*s;
// Offsets of left, right, top, and bottom data blocks and ghost blocks
int l = nx, lg = 0;
int r = ng, rg = nx+ng;
int b = ny*s, bg = 0;
int t = ng*s, tg = (nx+ng)*s;
// Copy data into ghost cells on each side
for (int k = 0; k < nfield; ++k) {
float* uk = u + k*field_stride;
copy_subgrid(uk+lg, uk+l, ng, ny+2*ng, s);
copy_subgrid(uk+rg, uk+r, ng, ny+2*ng, s);
copy_subgrid(uk+tg, uk+t, nx+2*ng, ng, s);
copy_subgrid(uk+bg, uk+b, nx+2*ng, ng, s);
}
}
/**
* ### Derivatives with limiters
*
* In order to advance the time step, we also need to estimate
* derivatives of the fluxes and the solution values at each cell.
* In order to maintain stability, we apply a limiter here.
*
* The minmod limiter *looks* like it should be expensive to computer,
* since superficially it seems to require a number of branches.
* We do something a little tricky, getting rid of the condition
* on the sign of the arguments using the `copysign` instruction.
* If the compiler does the "right" thing with `max` and `min`
* for floating point arguments (translating them to branch-free
* intrinsic operations), this implementation should be relatively fast.
*/
// Branch-free computation of minmod of two numbers times 2s
__host__ __device__ static inline
float xmin2s(float s, float a, float b) {
float sa = copysignf(s, a);
float sb = copysignf(s, b);
float abs_a = fabsf(a);
float abs_b = fabsf(b);
float min_abs = (abs_a < abs_b ? abs_a : abs_b);
return (sa+sb) * min_abs;
}
// Limited combined slope estimate
__host__ __device__ static inline
float limdiff(float um, float u0, float up) {
const float theta = 2.0;
const float quarter = 0.25;
float du1 = u0-um; // Difference to left
float du2 = up-u0; // Difference to right
float duc = up-um; // Twice centered difference
return xmin2s( quarter, xmin2s(theta, du1, du2), duc );
}
// Compute limited derivs
__host__ static inline
void limited_deriv1(float* restrict du,
const float* restrict u,
int ncell)
{
for (int i = 0; i < ncell; ++i)
du[i] = limdiff(u[i-1], u[i], u[i+1]);
}
// Compute limited derivs across stride
__host__ static inline
void limited_derivk(float* restrict du,
const float* restrict u,
int ncell, int stride)
{
assert(stride > 0);
for (int i = 0; i < ncell; ++i)
du[i] = limdiff(u[i-stride], u[i], u[i+stride]);
}
/**
* ### Advancing a time step
*
* Take one step of the numerical scheme. This consists of two pieces:
* a first-order corrector computed at a half time step, which is used
* to obtain new $F$ and $G$ values; and a corrector step that computes
* the solution at the full step. For full details, we refer to the
* [Jiang and Tadmor paper][jt].
*
* The `compute_step` function takes two arguments: the `io` flag
* which is the time step modulo 2 (0 if even, 1 if odd); and the `dt`
* flag, which actually determines the time step length. We need
* to know the even-vs-odd distinction because the Jiang-Tadmor
* scheme alternates between a primary grid (on even steps) and a
* staggered grid (on odd steps). This means that the data at $(i,j)$
* in an even step and the data at $(i,j)$ in an odd step represent
* values at different locations in space, offset by half a space step
* in each direction. Every other step, we shift things back by one
* mesh cell in each direction, essentially resetting to the primary
* indexing scheme.
*
* We're slightly tricky in the corrector in that we write
* $$
* v(i,j) = (s(i+1,j) + s(i,j)) - (d(i+1,j)-d(i,j))
* $$
* where $s(i,j)$ comprises the $u$ and $x$-derivative terms in the
* update formula, and $d(i,j)$ the $y$-derivative terms. This cuts
* the arithmetic cost a little (not that it's that big to start).
* It also makes it more obvious that we only need four rows worth
* of scratch space.
*/
// Predictor half-step
// Number of thread ny-2, nx-2
__global__ static
void central2d_predict_cuda(
float* restrict dev_v,
float* restrict dev_scratch,
const float* restrict dev_u,
const float* restrict dev_f,
const float* restrict dev_g,
float* dev_dtcdx2, float* dev_dtcdy2,
int* dev_nx, int* dev_ny,
int* dev_k)
{
float dtcdx2 = *dev_dtcdx2;
float dtcdy2 = *dev_dtcdy2;
int nx = *dev_nx;
int ny = *dev_ny;
int k = *dev_k;
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
const unsigned int tid = ((gridDim.x * blockDim.x) * idy) + idx;
int iy = tid / (nx-2) + 1;
int ix = tid % (nx-2) + 1;
int offset = (k*ny+iy)*nx;
float fx = limdiff(dev_f[ix-1+offset], dev_f[ix+offset], dev_f[ix+1+offset]);
float gy = limdiff(dev_g[ix-nx+offset], dev_g[ix+offset], dev_g[ix+nx+offset]);
int offset_ix = (k*ny+iy)*nx+ix;
dev_v[offset_ix] = dev_u[offset_ix] - dtcdx2 * fx - dtcdy2 * gy;
// Caution! Unlike series code, we only update scratch at the end
if (iy == ny-2) {
dev_scratch[ix] = fx;
dev_scratch[nx + ix] = gy;
}
}
static
void central2d_predict(float* restrict dev_v,
float* restrict dev_scratch,
const float* restrict dev_u,
const float* restrict dev_f,
const float* restrict dev_g,
float* dev_dtcdx2, float* dev_dtcdy2,
int* dev_nx, int* dev_ny,
int nfield, int nx, int ny)
{
int *dev_k;
hipMalloc((void**)&dev_k, sizeof(int));
for (int k = 0; k < nfield; ++k) {
hipMemcpy(dev_k, &k, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( central2d_predict_cuda), dim3(ny-2), dim3(nx-2), 0, 0,
dev_v,
dev_scratch,
dev_u,
dev_f,
dev_g,
dev_dtcdx2, dev_dtcdy2,
dev_nx, dev_ny,
dev_k
);
}
hipFree(dev_k);
}
// Expose for test purpose
extern "C"
void central2d_predict_wrapper(
float* restrict dev_v,
float* restrict dev_scratch,
const float* restrict dev_u,
const float* restrict dev_f,
const float* restrict dev_g,
float* dev_dtcdx2, float* dev_dtcdy2,
int* dev_nx, int* dev_ny,
int nfield, int nx, int ny)
{
central2d_predict(
dev_v,
dev_scratch,
dev_u,
dev_f,
dev_g,
dev_dtcdx2, dev_dtcdy2,
dev_nx,dev_ny,
nfield, nx, ny
);
}
// Corrector
__host__ static
void central2d_correct_sd(float* restrict s,
float* restrict d,
const float* restrict ux,
const float* restrict uy,
const float* restrict u,
const float* restrict f,
const float* restrict g,
float dtcdx2, float dtcdy2,
int xlo, int xhi)
{
for (int ix = xlo; ix < xhi; ++ix)
s[ix] =
0.2500f * (u [ix] + u [ix+1]) +
0.0625f * (ux[ix] - ux[ix+1]) +
dtcdx2 * (f [ix] - f [ix+1]);
for (int ix = xlo; ix < xhi; ++ix)
d[ix] =
0.0625f * (uy[ix] + uy[ix+1]) +
dtcdy2 * (g [ix] + g [ix+1]);
}
// Corrector
__host__ static
void central2d_correct(float* restrict v,
float* restrict scratch,
const float* restrict u,
const float* restrict f,
const float* restrict g,
float dtcdx2, float dtcdy2,
int xlo, int xhi, int ylo, int yhi,
int nx, int ny, int nfield)
{
assert(0 <= xlo && xlo < xhi && xhi <= nx);
assert(0 <= ylo && ylo < yhi && yhi <= ny);
float* restrict ux = scratch;
float* restrict uy = scratch + nx;
float* restrict s0 = scratch + 2*nx;
float* restrict d0 = scratch + 3*nx;
float* restrict s1 = scratch + 4*nx;
float* restrict d1 = scratch + 5*nx;
for (int k = 0; k < nfield; ++k) {
float* restrict vk = v + k*ny*nx;
const float* restrict uk = u + k*ny*nx;
const float* restrict fk = f + k*ny*nx;
const float* restrict gk = g + k*ny*nx;
limited_deriv1(ux+1, uk+ylo*nx+1, nx-2);
limited_derivk(uy+1, uk+ylo*nx+1, nx-2, nx);
central2d_correct_sd(s1, d1, ux, uy,
uk + ylo*nx, fk + ylo*nx, gk + ylo*nx,
dtcdx2, dtcdy2, xlo, xhi);
for (int iy = ylo; iy < yhi; ++iy) {
float* tmp;
tmp = s0; s0 = s1; s1 = tmp;
tmp = d0; d0 = d1; d1 = tmp;
limited_deriv1(ux+1, uk+(iy+1)*nx+1, nx-2);
limited_derivk(uy+1, uk+(iy+1)*nx+1, nx-2, nx);
central2d_correct_sd(s1, d1, ux, uy,
uk + (iy+1)*nx, fk + (iy+1)*nx, gk + (iy+1)*nx,
dtcdx2, dtcdy2, xlo, xhi);
for (int ix = xlo; ix < xhi; ++ix)
vk[iy*nx+ix] = (s1[ix]+s0[ix])-(d1[ix]-d0[ix]);
}
}
}
static
void central2d_step(float* restrict u,
float* restrict v,
float* restrict scratch,
float* restrict f,
float* restrict g,
float* dev_dtcdx2,
float* dev_dtcdy2,
int* dev_nx,
int* dev_ny,
int io, int nx, int ny, int ng,
int nfield, flux_t flux, speed_t speed,
float dt, float dx, float dy)
{
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
float dtcdx2 = 0.5 * dt / dx;
float dtcdy2 = 0.5 * dt / dy;
// Run on GPU, change dev_f and dev_g
flux(f, g, u, nx_all, ny_all, nx_all * ny_all);
hipMemcpy(dev_dtcdx2, &dtcdx2, sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_dtcdy2, &dtcdy2, sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_nx, &nx_all, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_ny, &ny_all, sizeof(int), hipMemcpyHostToDevice);
// Run on GPU, change dev_v and dev_scratch
central2d_predict(
v,
scratch,
u,
f,
g,
dev_dtcdx2,dev_dtcdy2,
dev_nx,dev_ny,
nfield, nx_all, ny_all
);
// Flux values of f and g at half step
for (int iy = 1; iy < ny_all-1; ++iy) {
int jj = iy*nx_all+1;
// Run on GPU, change dev_f and dev_g
flux(f+jj, g+jj, v+jj, 1, nx_all-2, nx_all * ny_all);
}
// UM requirement that no host thread is allowed to touch a managed data
// region after a kernel call until you explicitly do a hipDeviceSynchronize()
hipDeviceSynchronize();
// Run on CPU, change v and scratch
central2d_correct(v+io*(nx_all+1), scratch, u, f, g, dtcdx2, dtcdy2,
ng-io, nx+ng-io,
ng-io, ny+ng-io,
nx_all, ny_all, nfield);
}
/**
* ### Advance a fixed time
*
* The `run` method advances from time 0 (initial conditions) to time
* `tfinal`. Note that `run` can be called repeatedly; for example,
* we might want to advance for a period of time, write out a picture,
* advance more, and write another picture. In this sense, `tfinal`
* should be interpreted as an offset from the time represented by
* the simulator at the start of the call, rather than as an absolute time.
*
* We always take an even number of steps so that the solution
* at the end lives on the main grid instead of the staggered grid.
*/
static
int central2d_xrun(float* restrict u, float* restrict v,
float* restrict scratch,
float* restrict f,
float* restrict g,
int nx, int ny, int ng,
int nfield, flux_t flux, speed_t speed,
float tfinal, float dx, float dy, float cfl)
{
int nstep = 0;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
bool done = false;
float t = 0;
// for predict function only
float *dev_dtcdx2, *dev_dtcdy2;
int *dev_nx, *dev_ny;
hipMalloc( (void**)&dev_dtcdx2, sizeof(float) );
hipMalloc( (void**)&dev_dtcdy2, sizeof(float) );
hipMalloc( (void**)&dev_nx, sizeof(int) );
hipMalloc( (void**)&dev_ny, sizeof(int) );
// for speed function only
float *dev_cxy;
// hipMallocManaged( (void**)&cxy, 2*sizeof(float));
hipMalloc( (void**)&dev_cxy, 2*sizeof(float));
while (!done) {
float cxy[2] = {1.0e-15f, 1.0e-15f};
// Run on CPU, change u
central2d_periodic(u, nx, ny, ng, nfield); // CPU
hipMemcpy(dev_cxy, cxy, 2*sizeof(float), hipMemcpyHostToDevice);
// Run on GPU, change dev_cxy
speed(dev_cxy, u, nx_all, ny_all, nx_all * ny_all); // GPU
hipMemcpy(cxy, dev_cxy, 2*sizeof(float), hipMemcpyDeviceToHost);
// print_array(cxy, 2);
float dt = cfl / fmaxf(cxy[0]/dx, cxy[1]/dy);
if (t + 2*dt >= tfinal) {
dt = (tfinal-t)/2;
done = true;
}
// Run on both CPU and GPU
central2d_step(u, v, scratch, f, g,
dev_dtcdx2, dev_dtcdy2, dev_nx, dev_ny,
0, nx+4, ny+4, ng-2,
nfield, flux, speed,
dt, dx, dy);
central2d_step(v, u, scratch, f, g,
dev_dtcdx2, dev_dtcdy2, dev_nx, dev_ny,
1, nx, ny, ng,
nfield, flux, speed,
dt, dx, dy);
t += 2*dt;
nstep += 2;
// print_array(u, nx_all * ny_all);
}
hipFree(dev_cxy);
return nstep;
}
extern "C"
int central2d_run(central2d_t* sim, float tfinal)
{
return central2d_xrun(sim->u, sim->v, sim->scratch,
sim->f, sim->g,
sim->nx, sim->ny, sim->ng,
sim->nfield, sim->flux, sim->speed,
tfinal, sim->dx, sim->dy, sim->cfl);
}
| 56913b582a003bb6906a929726360661e7d8474a.cu | extern "C" {
#include "stepper.cuh"
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <stdbool.h>
#ifndef RESTRICT
#define restrict __restrict__
#endif /* RESTRICT */
//ldoc on
/**
* ## Implementation
*
* ### Structure allocation
*/
void print_array(float* array, int len) {
for(int i = 0; i < len; i++) {
printf("%.2f ", array[i]);
}
printf("\n");
}
extern "C"
central2d_t* central2d_init(float w, float h, int nx, int ny,
int nfield, flux_t flux, speed_t speed,
float cfl)
{
// We extend to a four cell buffer to avoid BC comm on odd time steps
int ng = 4;
central2d_t* sim;
cudaMallocManaged(&sim, sizeof(central2d_t));
sim->nx = nx;
sim->ny = ny;
sim->ng = ng;
sim->nfield = nfield;
sim->dx = w/nx;
sim->dy = h/ny;
sim->flux = flux;
sim->speed = speed;
sim->cfl = cfl;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int nc = nx_all * ny_all;
int N = nfield * nc;
cudaMallocManaged(&sim->u, (4*N + 6*nx_all)* sizeof(float));
sim->v = sim->u + N;
sim->f = sim->u + 2*N;
sim->g = sim->u + 3*N;
sim->scratch = sim->u + 4*N;
return sim;
}
extern "C"
void central2d_free(central2d_t* sim)
{
cudaFree(sim->u);
cudaFree(sim);
}
extern "C"
int central2d_offset(central2d_t* sim, int k, int ix, int iy)
{
int nx = sim->nx, ny = sim->ny, ng = sim->ng;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
return (k*ny_all+(ng+iy))*nx_all+(ng+ix);
}
/**
* ### Boundary conditions
*
* In finite volume methods, boundary conditions are typically applied by
* setting appropriate values in ghost cells. For our framework, we will
* apply periodic boundary conditions; that is, waves that exit one side
* of the domain will enter from the other side.
*
* We apply the conditions by assuming that the cells with coordinates
* `nghost <= ix <= nx+nghost` and `nghost <= iy <= ny+nghost` are
* "canonical", and setting the values for all other cells `(ix,iy)`
* to the corresponding canonical values `(ix+p*nx,iy+q*ny)` for some
* integers `p` and `q`.
*/
static inline
void copy_subgrid(float* restrict dst,
const float* restrict src,
int nx, int ny, int stride)
{
for (int iy = 0; iy < ny; ++iy)
for (int ix = 0; ix < nx; ++ix)
dst[iy*stride+ix] = src[iy*stride+ix];
}
// Change u
extern "C"
void central2d_periodic(float* restrict u,
int nx, int ny, int ng, int nfield)
{
// Stride and number per field
int s = nx + 2*ng;
int field_stride = (ny+2*ng)*s;
// Offsets of left, right, top, and bottom data blocks and ghost blocks
int l = nx, lg = 0;
int r = ng, rg = nx+ng;
int b = ny*s, bg = 0;
int t = ng*s, tg = (nx+ng)*s;
// Copy data into ghost cells on each side
for (int k = 0; k < nfield; ++k) {
float* uk = u + k*field_stride;
copy_subgrid(uk+lg, uk+l, ng, ny+2*ng, s);
copy_subgrid(uk+rg, uk+r, ng, ny+2*ng, s);
copy_subgrid(uk+tg, uk+t, nx+2*ng, ng, s);
copy_subgrid(uk+bg, uk+b, nx+2*ng, ng, s);
}
}
/**
* ### Derivatives with limiters
*
* In order to advance the time step, we also need to estimate
* derivatives of the fluxes and the solution values at each cell.
* In order to maintain stability, we apply a limiter here.
*
* The minmod limiter *looks* like it should be expensive to computer,
* since superficially it seems to require a number of branches.
* We do something a little tricky, getting rid of the condition
* on the sign of the arguments using the `copysign` instruction.
* If the compiler does the "right" thing with `max` and `min`
* for floating point arguments (translating them to branch-free
* intrinsic operations), this implementation should be relatively fast.
*/
// Branch-free computation of minmod of two numbers times 2s
__host__ __device__ static inline
float xmin2s(float s, float a, float b) {
float sa = copysignf(s, a);
float sb = copysignf(s, b);
float abs_a = fabsf(a);
float abs_b = fabsf(b);
float min_abs = (abs_a < abs_b ? abs_a : abs_b);
return (sa+sb) * min_abs;
}
// Limited combined slope estimate
__host__ __device__ static inline
float limdiff(float um, float u0, float up) {
const float theta = 2.0;
const float quarter = 0.25;
float du1 = u0-um; // Difference to left
float du2 = up-u0; // Difference to right
float duc = up-um; // Twice centered difference
return xmin2s( quarter, xmin2s(theta, du1, du2), duc );
}
// Compute limited derivs
__host__ static inline
void limited_deriv1(float* restrict du,
const float* restrict u,
int ncell)
{
for (int i = 0; i < ncell; ++i)
du[i] = limdiff(u[i-1], u[i], u[i+1]);
}
// Compute limited derivs across stride
__host__ static inline
void limited_derivk(float* restrict du,
const float* restrict u,
int ncell, int stride)
{
assert(stride > 0);
for (int i = 0; i < ncell; ++i)
du[i] = limdiff(u[i-stride], u[i], u[i+stride]);
}
/**
* ### Advancing a time step
*
* Take one step of the numerical scheme. This consists of two pieces:
* a first-order corrector computed at a half time step, which is used
* to obtain new $F$ and $G$ values; and a corrector step that computes
* the solution at the full step. For full details, we refer to the
* [Jiang and Tadmor paper][jt].
*
* The `compute_step` function takes two arguments: the `io` flag
* which is the time step modulo 2 (0 if even, 1 if odd); and the `dt`
* flag, which actually determines the time step length. We need
* to know the even-vs-odd distinction because the Jiang-Tadmor
* scheme alternates between a primary grid (on even steps) and a
* staggered grid (on odd steps). This means that the data at $(i,j)$
* in an even step and the data at $(i,j)$ in an odd step represent
* values at different locations in space, offset by half a space step
* in each direction. Every other step, we shift things back by one
* mesh cell in each direction, essentially resetting to the primary
* indexing scheme.
*
* We're slightly tricky in the corrector in that we write
* $$
* v(i,j) = (s(i+1,j) + s(i,j)) - (d(i+1,j)-d(i,j))
* $$
* where $s(i,j)$ comprises the $u$ and $x$-derivative terms in the
* update formula, and $d(i,j)$ the $y$-derivative terms. This cuts
* the arithmetic cost a little (not that it's that big to start).
* It also makes it more obvious that we only need four rows worth
* of scratch space.
*/
// Predictor half-step
// Number of thread ny-2, nx-2
__global__ static
void central2d_predict_cuda(
float* restrict dev_v,
float* restrict dev_scratch,
const float* restrict dev_u,
const float* restrict dev_f,
const float* restrict dev_g,
float* dev_dtcdx2, float* dev_dtcdy2,
int* dev_nx, int* dev_ny,
int* dev_k)
{
float dtcdx2 = *dev_dtcdx2;
float dtcdy2 = *dev_dtcdy2;
int nx = *dev_nx;
int ny = *dev_ny;
int k = *dev_k;
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
const unsigned int tid = ((gridDim.x * blockDim.x) * idy) + idx;
int iy = tid / (nx-2) + 1;
int ix = tid % (nx-2) + 1;
int offset = (k*ny+iy)*nx;
float fx = limdiff(dev_f[ix-1+offset], dev_f[ix+offset], dev_f[ix+1+offset]);
float gy = limdiff(dev_g[ix-nx+offset], dev_g[ix+offset], dev_g[ix+nx+offset]);
int offset_ix = (k*ny+iy)*nx+ix;
dev_v[offset_ix] = dev_u[offset_ix] - dtcdx2 * fx - dtcdy2 * gy;
// Caution! Unlike series code, we only update scratch at the end
if (iy == ny-2) {
dev_scratch[ix] = fx;
dev_scratch[nx + ix] = gy;
}
}
static
void central2d_predict(float* restrict dev_v,
float* restrict dev_scratch,
const float* restrict dev_u,
const float* restrict dev_f,
const float* restrict dev_g,
float* dev_dtcdx2, float* dev_dtcdy2,
int* dev_nx, int* dev_ny,
int nfield, int nx, int ny)
{
int *dev_k;
cudaMalloc((void**)&dev_k, sizeof(int));
for (int k = 0; k < nfield; ++k) {
cudaMemcpy(dev_k, &k, sizeof(int), cudaMemcpyHostToDevice);
central2d_predict_cuda<<<ny-2, nx-2>>>(
dev_v,
dev_scratch,
dev_u,
dev_f,
dev_g,
dev_dtcdx2, dev_dtcdy2,
dev_nx, dev_ny,
dev_k
);
}
cudaFree(dev_k);
}
// Expose for test purpose
extern "C"
void central2d_predict_wrapper(
float* restrict dev_v,
float* restrict dev_scratch,
const float* restrict dev_u,
const float* restrict dev_f,
const float* restrict dev_g,
float* dev_dtcdx2, float* dev_dtcdy2,
int* dev_nx, int* dev_ny,
int nfield, int nx, int ny)
{
central2d_predict(
dev_v,
dev_scratch,
dev_u,
dev_f,
dev_g,
dev_dtcdx2, dev_dtcdy2,
dev_nx,dev_ny,
nfield, nx, ny
);
}
// Corrector
__host__ static
void central2d_correct_sd(float* restrict s,
float* restrict d,
const float* restrict ux,
const float* restrict uy,
const float* restrict u,
const float* restrict f,
const float* restrict g,
float dtcdx2, float dtcdy2,
int xlo, int xhi)
{
for (int ix = xlo; ix < xhi; ++ix)
s[ix] =
0.2500f * (u [ix] + u [ix+1]) +
0.0625f * (ux[ix] - ux[ix+1]) +
dtcdx2 * (f [ix] - f [ix+1]);
for (int ix = xlo; ix < xhi; ++ix)
d[ix] =
0.0625f * (uy[ix] + uy[ix+1]) +
dtcdy2 * (g [ix] + g [ix+1]);
}
// Corrector
__host__ static
void central2d_correct(float* restrict v,
float* restrict scratch,
const float* restrict u,
const float* restrict f,
const float* restrict g,
float dtcdx2, float dtcdy2,
int xlo, int xhi, int ylo, int yhi,
int nx, int ny, int nfield)
{
assert(0 <= xlo && xlo < xhi && xhi <= nx);
assert(0 <= ylo && ylo < yhi && yhi <= ny);
float* restrict ux = scratch;
float* restrict uy = scratch + nx;
float* restrict s0 = scratch + 2*nx;
float* restrict d0 = scratch + 3*nx;
float* restrict s1 = scratch + 4*nx;
float* restrict d1 = scratch + 5*nx;
for (int k = 0; k < nfield; ++k) {
float* restrict vk = v + k*ny*nx;
const float* restrict uk = u + k*ny*nx;
const float* restrict fk = f + k*ny*nx;
const float* restrict gk = g + k*ny*nx;
limited_deriv1(ux+1, uk+ylo*nx+1, nx-2);
limited_derivk(uy+1, uk+ylo*nx+1, nx-2, nx);
central2d_correct_sd(s1, d1, ux, uy,
uk + ylo*nx, fk + ylo*nx, gk + ylo*nx,
dtcdx2, dtcdy2, xlo, xhi);
for (int iy = ylo; iy < yhi; ++iy) {
float* tmp;
tmp = s0; s0 = s1; s1 = tmp;
tmp = d0; d0 = d1; d1 = tmp;
limited_deriv1(ux+1, uk+(iy+1)*nx+1, nx-2);
limited_derivk(uy+1, uk+(iy+1)*nx+1, nx-2, nx);
central2d_correct_sd(s1, d1, ux, uy,
uk + (iy+1)*nx, fk + (iy+1)*nx, gk + (iy+1)*nx,
dtcdx2, dtcdy2, xlo, xhi);
for (int ix = xlo; ix < xhi; ++ix)
vk[iy*nx+ix] = (s1[ix]+s0[ix])-(d1[ix]-d0[ix]);
}
}
}
static
void central2d_step(float* restrict u,
float* restrict v,
float* restrict scratch,
float* restrict f,
float* restrict g,
float* dev_dtcdx2,
float* dev_dtcdy2,
int* dev_nx,
int* dev_ny,
int io, int nx, int ny, int ng,
int nfield, flux_t flux, speed_t speed,
float dt, float dx, float dy)
{
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
float dtcdx2 = 0.5 * dt / dx;
float dtcdy2 = 0.5 * dt / dy;
// Run on GPU, change dev_f and dev_g
flux(f, g, u, nx_all, ny_all, nx_all * ny_all);
cudaMemcpy(dev_dtcdx2, &dtcdx2, sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_dtcdy2, &dtcdy2, sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_nx, &nx_all, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_ny, &ny_all, sizeof(int), cudaMemcpyHostToDevice);
// Run on GPU, change dev_v and dev_scratch
central2d_predict(
v,
scratch,
u,
f,
g,
dev_dtcdx2,dev_dtcdy2,
dev_nx,dev_ny,
nfield, nx_all, ny_all
);
// Flux values of f and g at half step
for (int iy = 1; iy < ny_all-1; ++iy) {
int jj = iy*nx_all+1;
// Run on GPU, change dev_f and dev_g
flux(f+jj, g+jj, v+jj, 1, nx_all-2, nx_all * ny_all);
}
// UM requirement that no host thread is allowed to touch a managed data
// region after a kernel call until you explicitly do a cudaDeviceSynchronize()
cudaDeviceSynchronize();
// Run on CPU, change v and scratch
central2d_correct(v+io*(nx_all+1), scratch, u, f, g, dtcdx2, dtcdy2,
ng-io, nx+ng-io,
ng-io, ny+ng-io,
nx_all, ny_all, nfield);
}
/**
* ### Advance a fixed time
*
* The `run` method advances from time 0 (initial conditions) to time
* `tfinal`. Note that `run` can be called repeatedly; for example,
* we might want to advance for a period of time, write out a picture,
* advance more, and write another picture. In this sense, `tfinal`
* should be interpreted as an offset from the time represented by
* the simulator at the start of the call, rather than as an absolute time.
*
* We always take an even number of steps so that the solution
* at the end lives on the main grid instead of the staggered grid.
*/
static
int central2d_xrun(float* restrict u, float* restrict v,
float* restrict scratch,
float* restrict f,
float* restrict g,
int nx, int ny, int ng,
int nfield, flux_t flux, speed_t speed,
float tfinal, float dx, float dy, float cfl)
{
int nstep = 0;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
bool done = false;
float t = 0;
// for predict function only
float *dev_dtcdx2, *dev_dtcdy2;
int *dev_nx, *dev_ny;
cudaMalloc( (void**)&dev_dtcdx2, sizeof(float) );
cudaMalloc( (void**)&dev_dtcdy2, sizeof(float) );
cudaMalloc( (void**)&dev_nx, sizeof(int) );
cudaMalloc( (void**)&dev_ny, sizeof(int) );
// for speed function only
float *dev_cxy;
// cudaMallocManaged( (void**)&cxy, 2*sizeof(float));
cudaMalloc( (void**)&dev_cxy, 2*sizeof(float));
while (!done) {
float cxy[2] = {1.0e-15f, 1.0e-15f};
// Run on CPU, change u
central2d_periodic(u, nx, ny, ng, nfield); // CPU
cudaMemcpy(dev_cxy, cxy, 2*sizeof(float), cudaMemcpyHostToDevice);
// Run on GPU, change dev_cxy
speed(dev_cxy, u, nx_all, ny_all, nx_all * ny_all); // GPU
cudaMemcpy(cxy, dev_cxy, 2*sizeof(float), cudaMemcpyDeviceToHost);
// print_array(cxy, 2);
float dt = cfl / fmaxf(cxy[0]/dx, cxy[1]/dy);
if (t + 2*dt >= tfinal) {
dt = (tfinal-t)/2;
done = true;
}
// Run on both CPU and GPU
central2d_step(u, v, scratch, f, g,
dev_dtcdx2, dev_dtcdy2, dev_nx, dev_ny,
0, nx+4, ny+4, ng-2,
nfield, flux, speed,
dt, dx, dy);
central2d_step(v, u, scratch, f, g,
dev_dtcdx2, dev_dtcdy2, dev_nx, dev_ny,
1, nx, ny, ng,
nfield, flux, speed,
dt, dx, dy);
t += 2*dt;
nstep += 2;
// print_array(u, nx_all * ny_all);
}
cudaFree(dev_cxy);
return nstep;
}
extern "C"
int central2d_run(central2d_t* sim, float tfinal)
{
return central2d_xrun(sim->u, sim->v, sim->scratch,
sim->f, sim->g,
sim->nx, sim->ny, sim->ng,
sim->nfield, sim->flux, sim->speed,
tfinal, sim->dx, sim->dy, sim->cfl);
}
|
df5b9c233a460098b98ded513e827541a7811057.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "getwtw.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
hipMalloc(&Params, XSIZE*YSIZE);
const double *dWU = NULL;
hipMalloc(&dWU, XSIZE*YSIZE);
double *wtw = NULL;
hipMalloc(&wtw, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
getwtw), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,dWU,wtw);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
getwtw), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,dWU,wtw);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
getwtw), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,dWU,wtw);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | df5b9c233a460098b98ded513e827541a7811057.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "getwtw.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
cudaMalloc(&Params, XSIZE*YSIZE);
const double *dWU = NULL;
cudaMalloc(&dWU, XSIZE*YSIZE);
double *wtw = NULL;
cudaMalloc(&wtw, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
getwtw<<<gridBlock,threadBlock>>>(Params,dWU,wtw);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
getwtw<<<gridBlock,threadBlock>>>(Params,dWU,wtw);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
getwtw<<<gridBlock,threadBlock>>>(Params,dWU,wtw);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5a263bc2250186ea936c59bd94f80efe33c4c5b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <time.h>
#include <random>
#include <vector>
#include <fstream>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include "timer.h"
#include "cuda_error_check.h"
#include "alloc.h"
#include "reduce_block_1d.h"
#include "complex.h"
//#include "special_functions.cuh"
#include "parameters.h"
#include "index.h"
#include "tune.h"
#include "array.h"
using namespace std;
namespace U1{
template<bool EO_TO_NO_ORDER>
__global__ void kernel_convert_EO_NO(const double *in, double *out){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= Volume() ) return;
if(EO_TO_NO_ORDER){
int parity = 0;
if( id >= HalfVolume() ){
parity = 1;
id -= HalfVolume();
}
int x[4];
indexEO(id, parity, x);
size_t idx = indexId(x);
for(int dir = 0; dir < Dirs(); dir++){
out[idx + dir * Volume()] = in[id + parity * HalfVolume() + dir * Volume()];
}
}
else{
int x[4];
indexNO(id, x);
size_t idx = indexId(x) >> 1;
int parity = GetParity(x);
for(int dir = 0; dir < Dirs(); dir++){
out[id + parity * HalfVolume() + dir * Volume()] = in[idx + dir * Volume()];
}
}
}
template<bool EO_TO_NO_ORDER>
class ConvLattice_EO_NO: Tunable{
public:
private:
Array<double>* lat;
Array<double>* latno;
int size;
double timesec;
#ifdef TIMMINGS
Timer time;
#endif
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( kernel_convert_EO_NO<EO_TO_NO_ORDER>), dim3(tp.grid), dim3(tp.block), 0, stream, lat->getPtr(), latno->getPtr());
}
public:
ConvLattice_EO_NO(Array<double>* lat) : lat(lat) {
size = Volume();
latno = new Array<double>(Device, Dirs()*size);
timesec = 0.0;
}
~ConvLattice_EO_NO(){ };
Array<double>* Run(const hipStream_t &stream){
#ifdef TIMMINGS
time.start();
#endif
apply(stream);
cudaDevSync();
cudaCheckError("Kernel execution failed");
#ifdef TIMMINGS
cudaDevSync( );
time.stop();
timesec = time.getElapsedTimeInSec();
#endif
return latno;
}
Array<double>* Run(){ return Run(0); }
double flops(){ return ((double)flop() * 1.0e-9) / timesec;}
double bandwidth(){ return (double)bytes() / (timesec * (double)(1 << 30));}
long long flop() const { return 0;}
long long bytes() const{ return 0;}
double time(){ return timesec;}
void stat(){ cout << "OverRelaxation: " << time() << " s\t" << bandwidth() << " GB/s\t" << flops() << " GFlops" << endl;}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << PARAMS::Grid[0] << "x";
vol << PARAMS::Grid[1] << "x";
vol << PARAMS::Grid[2] << "x";
vol << PARAMS::Grid[3];
aux << "threads=" << size;
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune() { }
void postTune() { }
};
Array<double>* LatticeConvert(Array<double>* lat, bool eo_to_no){
if(eo_to_no){
ConvLattice_EO_NO<true> cv(lat);
return cv.Run();
}
else{
ConvLattice_EO_NO<false> cv(lat);
return cv.Run();
}
}
}
| 5a263bc2250186ea936c59bd94f80efe33c4c5b3.cu | #include <iostream>
#include <math.h>
#include <time.h>
#include <random>
#include <vector>
#include <fstream>
#include <omp.h>
#include <cuda.h>
#include <curand_kernel.h>
#include "timer.h"
#include "cuda_error_check.h"
#include "alloc.h"
#include "reduce_block_1d.h"
#include "complex.h"
//#include "special_functions.cuh"
#include "parameters.h"
#include "index.h"
#include "tune.h"
#include "array.h"
using namespace std;
namespace U1{
template<bool EO_TO_NO_ORDER>
__global__ void kernel_convert_EO_NO(const double *in, double *out){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= Volume() ) return;
if(EO_TO_NO_ORDER){
int parity = 0;
if( id >= HalfVolume() ){
parity = 1;
id -= HalfVolume();
}
int x[4];
indexEO(id, parity, x);
size_t idx = indexId(x);
for(int dir = 0; dir < Dirs(); dir++){
out[idx + dir * Volume()] = in[id + parity * HalfVolume() + dir * Volume()];
}
}
else{
int x[4];
indexNO(id, x);
size_t idx = indexId(x) >> 1;
int parity = GetParity(x);
for(int dir = 0; dir < Dirs(); dir++){
out[id + parity * HalfVolume() + dir * Volume()] = in[idx + dir * Volume()];
}
}
}
template<bool EO_TO_NO_ORDER>
class ConvLattice_EO_NO: Tunable{
public:
private:
Array<double>* lat;
Array<double>* latno;
int size;
double timesec;
#ifdef TIMMINGS
Timer time;
#endif
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_convert_EO_NO<EO_TO_NO_ORDER><<<tp.grid, tp.block, 0, stream>>>(lat->getPtr(), latno->getPtr());
}
public:
ConvLattice_EO_NO(Array<double>* lat) : lat(lat) {
size = Volume();
latno = new Array<double>(Device, Dirs()*size);
timesec = 0.0;
}
~ConvLattice_EO_NO(){ };
Array<double>* Run(const cudaStream_t &stream){
#ifdef TIMMINGS
time.start();
#endif
apply(stream);
cudaDevSync();
cudaCheckError("Kernel execution failed");
#ifdef TIMMINGS
cudaDevSync( );
time.stop();
timesec = time.getElapsedTimeInSec();
#endif
return latno;
}
Array<double>* Run(){ return Run(0); }
double flops(){ return ((double)flop() * 1.0e-9) / timesec;}
double bandwidth(){ return (double)bytes() / (timesec * (double)(1 << 30));}
long long flop() const { return 0;}
long long bytes() const{ return 0;}
double time(){ return timesec;}
void stat(){ cout << "OverRelaxation: " << time() << " s\t" << bandwidth() << " GB/s\t" << flops() << " GFlops" << endl;}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << PARAMS::Grid[0] << "x";
vol << PARAMS::Grid[1] << "x";
vol << PARAMS::Grid[2] << "x";
vol << PARAMS::Grid[3];
aux << "threads=" << size;
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune() { }
void postTune() { }
};
Array<double>* LatticeConvert(Array<double>* lat, bool eo_to_no){
if(eo_to_no){
ConvLattice_EO_NO<true> cv(lat);
return cv.Run();
}
else{
ConvLattice_EO_NO<false> cv(lat);
return cv.Run();
}
}
}
|
962b096ebc5121ee8e3f8c232a34e21be6378ee2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlag2c_sparse.cu mixed zc -> ds, Wed Sep 17 15:08:43 2014
*/
#include "common_magma.h"
#include "magmasparse_z.h"
#include "magmasparse_ds.h"
#include "magma.h"
#include "mmio.h"
#include "common_magma.h"
#define PRECISION_d
#define BLOCKSIZE 512
// TODO get rid of global variable!
__device__ int flag = 0;
__global__ void
magmaint_dlag2s_sparse( int M, int N,
const double *A,
float *SA ){
int thread_id = blockDim.x * blockIdx.x + threadIdx.x ;
// global thread index
if( thread_id < M ){
for( int i=0; i<N; i++ ){
SA[i*M+thread_id] = (float)( A[i*M+thread_id] );
}
}
}
/**
Purpose
-------
DLAG2S converts a DOUBLE PRECISION matrix A to a SINGLE PRECISION
matrix SA.
RMAX is the overflow for the SINGLE PRECISION arithmetic.
DLAG2S checks that all the entries of A are between -RMAX and
RMAX. If not the convertion is aborted and a flag is raised.
Arguments
---------
@param[in]
M INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
N INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,N)
On entry, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,N)
On exit, if INFO=0, the M-by-N coefficient matrix SA; if
INFO>0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA in exit is unspecified.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" void
magmablas_dlag2s_sparse( magma_int_t M, magma_int_t N,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info )
{
/*
(TODO note from original dense source)
Note
----
- We have to provide INFO at the end that dlag2s isn't doable now.
- Transfer a single value TO/FROM CPU/GPU
- SLAMCH that's needed is called from underlying BLAS
- Only used in iterative refinement
- Do we want to provide this in the release?
*/
*info = 0;
if ( M < 0 )
*info = -1;
else if ( N < 0 )
*info = -2;
else if ( lda < max(1,M) )
*info = -4;
else if ( ldsa < max(1,M) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
//return *info;
}
dim3 grid( (M+BLOCKSIZE-1)/BLOCKSIZE, 1, 1);
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
hipLaunchKernelGGL(( magmaint_dlag2s_sparse), dim3(grid), dim3(BLOCKSIZE), 0, magma_stream ,
M, N, A, SA ) ;
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
__global__ void
magma_dlag2s_CSR_DENSE_kernel( int num_rows, int num_cols,
double *Aval, magma_index_t *Arow,
magma_index_t *Acol, float *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
for( j=0; j<num_cols; j++)
Bval[ j ] = MAGMA_S_MAKE(0.0, 0.0);
int start = Arow[ row ];
int end = Arow[ row+1 ];
for( j=start; j<end; j++ )
Bval[ row*num_rows+Acol[j] ] = (float)( Aval[ j] );
}
}
__global__ void
magma_dlag2s_CSR_DENSE_kernel_1( int num_rows, int num_cols,
float *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
for( j=0; j<num_cols; j++)
Bval[ j ] = MAGMA_S_MAKE(0.0, 0.0);
}
}
__global__ void
magma_dlag2s_CSR_DENSE_kernel_2( int num_rows, int num_cols,
double *Aval, magma_index_t *Arow,
magma_index_t *Acol, float *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
int start = Arow[ row ];
int end = Arow[ row+1 ];
for( j=start; j<end; j++ )
Bval[ row*num_rows+Acol[j] ] = (float)( Aval[ j] );
}
}
extern "C" void
magma_dlag2s_CSR_DENSE( magma_d_sparse_matrix A,
magma_s_sparse_matrix *B ){
magma_int_t stat;
if( A.memory_location == Magma_DEV && A.storage_type == Magma_CSR){
B->storage_type = Magma_DENSE;
B->memory_location = A.memory_location;
B->num_rows = A.num_rows;
B->num_cols = A.num_cols;
B->nnz = A.nnz;
stat = magma_smalloc( &B->val, A.num_rows* A.num_cols );
if( stat != 0 )
{printf("Memory Allocation Error converting matrix\n"); exit(0); }
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
hipLaunchKernelGGL(( magma_dlag2s_CSR_DENSE_kernel), dim3(Bs), dim3(Gs), 0, magma_stream ,
A.num_rows, A.num_cols, A.val, A.row, A.col, B->val );
}
}
extern "C" void
magma_dlag2s_CSR_DENSE_alloc( magma_d_sparse_matrix A,
magma_s_sparse_matrix *B ){
magma_int_t stat;
if( A.memory_location == Magma_DEV && A.storage_type == Magma_CSR){
B->storage_type = Magma_DENSE;
B->memory_location = A.memory_location;
B->num_rows = A.num_rows;
B->num_cols = A.num_cols;
B->nnz = A.nnz;
stat = magma_smalloc( &B->val, A.num_rows* A.num_cols );
if( stat != 0 )
{printf("Memory Allocation Error converting matrix\n"); exit(0); }
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
hipLaunchKernelGGL(( magma_dlag2s_CSR_DENSE_kernel_1), dim3(Bs), dim3(Gs), 0, magma_stream ,
A.num_rows, A.num_cols, B->val );
}
}
extern "C" void
magma_dlag2s_CSR_DENSE_convert( magma_d_sparse_matrix A,
magma_s_sparse_matrix *B ){
if( B->memory_location == Magma_DEV && B->storage_type == Magma_DENSE){
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
hipLaunchKernelGGL(( magma_dlag2s_CSR_DENSE_kernel_2), dim3(Bs), dim3(Gs), 0, magma_stream ,
A.num_rows, A.num_cols, A.val, A.row, A.col, B->val );
}
}
| 962b096ebc5121ee8e3f8c232a34e21be6378ee2.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlag2c_sparse.cu mixed zc -> ds, Wed Sep 17 15:08:43 2014
*/
#include "common_magma.h"
#include "magmasparse_z.h"
#include "magmasparse_ds.h"
#include "magma.h"
#include "mmio.h"
#include "common_magma.h"
#define PRECISION_d
#define BLOCKSIZE 512
// TODO get rid of global variable!
__device__ int flag = 0;
__global__ void
magmaint_dlag2s_sparse( int M, int N,
const double *A,
float *SA ){
int thread_id = blockDim.x * blockIdx.x + threadIdx.x ;
// global thread index
if( thread_id < M ){
for( int i=0; i<N; i++ ){
SA[i*M+thread_id] = (float)( A[i*M+thread_id] );
}
}
}
/**
Purpose
-------
DLAG2S converts a DOUBLE PRECISION matrix A to a SINGLE PRECISION
matrix SA.
RMAX is the overflow for the SINGLE PRECISION arithmetic.
DLAG2S checks that all the entries of A are between -RMAX and
RMAX. If not the convertion is aborted and a flag is raised.
Arguments
---------
@param[in]
M INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
N INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,N)
On entry, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,N)
On exit, if INFO=0, the M-by-N coefficient matrix SA; if
INFO>0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA in exit is unspecified.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" void
magmablas_dlag2s_sparse( magma_int_t M, magma_int_t N,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info )
{
/*
(TODO note from original dense source)
Note
----
- We have to provide INFO at the end that dlag2s isn't doable now.
- Transfer a single value TO/FROM CPU/GPU
- SLAMCH that's needed is called from underlying BLAS
- Only used in iterative refinement
- Do we want to provide this in the release?
*/
*info = 0;
if ( M < 0 )
*info = -1;
else if ( N < 0 )
*info = -2;
else if ( lda < max(1,M) )
*info = -4;
else if ( ldsa < max(1,M) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
//return *info;
}
dim3 grid( (M+BLOCKSIZE-1)/BLOCKSIZE, 1, 1);
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
magmaint_dlag2s_sparse<<< grid, BLOCKSIZE, 0, magma_stream >>>
( M, N, A, SA ) ;
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
__global__ void
magma_dlag2s_CSR_DENSE_kernel( int num_rows, int num_cols,
double *Aval, magma_index_t *Arow,
magma_index_t *Acol, float *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
for( j=0; j<num_cols; j++)
Bval[ j ] = MAGMA_S_MAKE(0.0, 0.0);
int start = Arow[ row ];
int end = Arow[ row+1 ];
for( j=start; j<end; j++ )
Bval[ row*num_rows+Acol[j] ] = (float)( Aval[ j] );
}
}
__global__ void
magma_dlag2s_CSR_DENSE_kernel_1( int num_rows, int num_cols,
float *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
for( j=0; j<num_cols; j++)
Bval[ j ] = MAGMA_S_MAKE(0.0, 0.0);
}
}
__global__ void
magma_dlag2s_CSR_DENSE_kernel_2( int num_rows, int num_cols,
double *Aval, magma_index_t *Arow,
magma_index_t *Acol, float *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
int start = Arow[ row ];
int end = Arow[ row+1 ];
for( j=start; j<end; j++ )
Bval[ row*num_rows+Acol[j] ] = (float)( Aval[ j] );
}
}
extern "C" void
magma_dlag2s_CSR_DENSE( magma_d_sparse_matrix A,
magma_s_sparse_matrix *B ){
magma_int_t stat;
if( A.memory_location == Magma_DEV && A.storage_type == Magma_CSR){
B->storage_type = Magma_DENSE;
B->memory_location = A.memory_location;
B->num_rows = A.num_rows;
B->num_cols = A.num_cols;
B->nnz = A.nnz;
stat = magma_smalloc( &B->val, A.num_rows* A.num_cols );
if( stat != 0 )
{printf("Memory Allocation Error converting matrix\n"); exit(0); }
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
magma_dlag2s_CSR_DENSE_kernel<<< Bs, Gs, 0, magma_stream >>>
( A.num_rows, A.num_cols, A.val, A.row, A.col, B->val );
}
}
extern "C" void
magma_dlag2s_CSR_DENSE_alloc( magma_d_sparse_matrix A,
magma_s_sparse_matrix *B ){
magma_int_t stat;
if( A.memory_location == Magma_DEV && A.storage_type == Magma_CSR){
B->storage_type = Magma_DENSE;
B->memory_location = A.memory_location;
B->num_rows = A.num_rows;
B->num_cols = A.num_cols;
B->nnz = A.nnz;
stat = magma_smalloc( &B->val, A.num_rows* A.num_cols );
if( stat != 0 )
{printf("Memory Allocation Error converting matrix\n"); exit(0); }
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
magma_dlag2s_CSR_DENSE_kernel_1<<< Bs, Gs, 0, magma_stream >>>
( A.num_rows, A.num_cols, B->val );
}
}
extern "C" void
magma_dlag2s_CSR_DENSE_convert( magma_d_sparse_matrix A,
magma_s_sparse_matrix *B ){
if( B->memory_location == Magma_DEV && B->storage_type == Magma_DENSE){
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
magma_dlag2s_CSR_DENSE_kernel_2<<< Bs, Gs, 0, magma_stream >>>
( A.num_rows, A.num_cols, A.val, A.row, A.col, B->val );
}
}
|
12db2ff006d70f4a7ab46168f2930505e6cae321.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
using namespace std;
#define eps 1e-4
__global__ void accumulate(float *da, float* ans_device, int N){
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx * blockDim.x + tx;
//printf("%d\n", idx);
for(int stride = N / 2; stride > 0; stride >>= 1){
if(idx < stride){
da[idx] = da[idx] + da[idx + stride];
}
__syncthreads();
}
if(idx == 0){
ans_device[0] = da[idx];
//printf("ans 0: %f\n", ans_device[0]);
}
}
float accumulate_cpu(float *da, int size){
if(size == 1)
return da[0];
int newsize = size / 2;
int stride = newsize;
for(int i = 0; i < newsize; i++){
da[i] = da[i] + da[i + stride];
}
if(size % 2 == 1){
da[0] = da[0] + da[size - 1];
}
else{
;
}
return accumulate_cpu(da, newsize);
}
void check(float *ha, float *ans_host, int N){
float sum = 0;
//cout<<sum<<' '<<ans_host[0]<<endl;
for(int i = 0; i < N; i++){
sum += ha[i];
}
if(sum == ans_host[0]){
cout<<"Nice ! Equal !!!"<<endl;
}
else{
cout<<"Bad ! Not Equal !"<<endl;
}
}
int main(){
int N = 1<<8;
size_t size = N * sizeof(float);
float *ha = (float*)malloc(size);
float *ans_host = (float*)malloc(1*sizeof(float));
for(int i = 0; i < N; i++)
ha[i] = 1;
//float ans = accumulate_cpu(ha, N);
//cout<<ans<<endl;
float *da = NULL;
float *ans_device = NULL;
hipMalloc((void**)&da, size);
hipMalloc((void**)&ans_device, 1*sizeof(float));
hipMemcpy(da, ha, size, hipMemcpyHostToDevice);
//dim3 threadPerBlock(N);
//dim3 blockPerGrid(1);
dim3 threadPerBlock(32);
dim3 blockPerGrid((N + threadPerBlock.x - 1) / threadPerBlock.x);
hipLaunchKernelGGL(( accumulate), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, da, ans_device, N);
hipDeviceSynchronize();
hipMemcpy(ans_host, ans_device, 1*sizeof(float), hipMemcpyDeviceToHost);
check(ha, ans_host, N);
free(ans_host);
free(ha);
hipFree(ans_device);
hipFree(da);
return 0;
} | 12db2ff006d70f4a7ab46168f2930505e6cae321.cu | #include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
using namespace std;
#define eps 1e-4
__global__ void accumulate(float *da, float* ans_device, int N){
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx * blockDim.x + tx;
//printf("%d\n", idx);
for(int stride = N / 2; stride > 0; stride >>= 1){
if(idx < stride){
da[idx] = da[idx] + da[idx + stride];
}
__syncthreads();
}
if(idx == 0){
ans_device[0] = da[idx];
//printf("ans 0: %f\n", ans_device[0]);
}
}
float accumulate_cpu(float *da, int size){
if(size == 1)
return da[0];
int newsize = size / 2;
int stride = newsize;
for(int i = 0; i < newsize; i++){
da[i] = da[i] + da[i + stride];
}
if(size % 2 == 1){
da[0] = da[0] + da[size - 1];
}
else{
;
}
return accumulate_cpu(da, newsize);
}
void check(float *ha, float *ans_host, int N){
float sum = 0;
//cout<<sum<<' '<<ans_host[0]<<endl;
for(int i = 0; i < N; i++){
sum += ha[i];
}
if(sum == ans_host[0]){
cout<<"Nice ! Equal !!!"<<endl;
}
else{
cout<<"Bad ! Not Equal !"<<endl;
}
}
int main(){
int N = 1<<8;
size_t size = N * sizeof(float);
float *ha = (float*)malloc(size);
float *ans_host = (float*)malloc(1*sizeof(float));
for(int i = 0; i < N; i++)
ha[i] = 1;
//float ans = accumulate_cpu(ha, N);
//cout<<ans<<endl;
float *da = NULL;
float *ans_device = NULL;
cudaMalloc((void**)&da, size);
cudaMalloc((void**)&ans_device, 1*sizeof(float));
cudaMemcpy(da, ha, size, cudaMemcpyHostToDevice);
//dim3 threadPerBlock(N);
//dim3 blockPerGrid(1);
dim3 threadPerBlock(32);
dim3 blockPerGrid((N + threadPerBlock.x - 1) / threadPerBlock.x);
accumulate<<<blockPerGrid, threadPerBlock>>> (da, ans_device, N);
cudaDeviceSynchronize();
cudaMemcpy(ans_host, ans_device, 1*sizeof(float), cudaMemcpyDeviceToHost);
check(ha, ans_host, N);
free(ans_host);
free(ha);
cudaFree(ans_device);
cudaFree(da);
return 0;
} |
f33b490082fa98ad3edfc49d9236b2801fd05956.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <array/ConstantDataBuffer.h>
#include <array/NDArray.h>
#include <array/NDArrayFactory.h>
#include <array/ShapeDescriptor.h>
#include <hip/hip_runtime.h>
#include <exceptions/cuda_exception.h>
#include <graph/Context.h>
#include <graph/Node.h>
#include <graph/Variable.h>
#include <graph/VariableSpace.h>
#include <helpers/ConstantShapeHelper.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/MmulHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/RandomLauncher.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/specials_cuda.h>
#include "testlayers.h"
using namespace sd;
using namespace sd::graph;
class CudaBasicsTests1 : public testing::Test {
public:
};
//////////////////////////////////////////////////////////////////////////
static hipError_t allocateDeviceMem(LaunchContext &lc, std::vector<void *> &devicePtrs,
const std::vector<std::pair<void *, size_t>> &hostData) {
if (devicePtrs.size() != hostData.size())
throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !");
hipError_t cudaResult;
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
if (cudaResult != 0) return cudaResult;
int *allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
if (cudaResult != 0) return cudaResult;
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
hipStream_t stream = *lc.getCudaStream();
for (int i = 0; i < devicePtrs.size(); ++i) {
cudaResult = hipMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second);
if (cudaResult != 0) return cudaResult;
hipMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, hipMemcpyHostToDevice, stream);
}
return cudaResult;
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, TestPairwise_1) {
// allocating host-side arrays
auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5});
auto z = NDArrayFactory::create<double>('c', {5}, {0, 0, 0, 0, 0});
auto exp = NDArrayFactory::create<double>('c', {5}, {2, 4, 6, 8, 10});
// making raw buffers
sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX;
hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT());
ASSERT_EQ(0, res);
res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT());
ASSERT_EQ(0, res);
res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo()));
ASSERT_EQ(0, res);
sd::Pointer nativeStream = (sd::Pointer)malloc(sizeof(hipStream_t));
CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(hipStream_t));
hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream));
auto stream = reinterpret_cast<hipStream_t *>(&nativeStream);
x.dataBuffer()->allocatePrimary();
x.syncToHost();
hipMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), hipMemcpyHostToDevice, *stream);
hipMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), hipMemcpyHostToDevice,
*stream);
res = hipStreamSynchronize(*stream);
ASSERT_EQ(0, res);
LaunchContext lc(stream, nullptr, nullptr);
NativeOpExecutioner::execPairwiseTransform(
&lc, pairwise::Add, nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<sd::LongType *>(devShapePtrX),
nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<sd::LongType *>(devShapePtrX), nullptr, z.shapeInfo(),
devBufferPtrZ, reinterpret_cast<sd::LongType *>(devShapePtrX), nullptr);
res = hipStreamSynchronize(*stream);
ASSERT_EQ(0, res);
z.dataBuffer()->allocatePrimary();
hipMemcpyAsync(z.buffer(), devBufferPtrZ, z.lengthOf() * x.sizeOfT(), hipMemcpyDeviceToHost, *stream);
res = hipStreamSynchronize(*stream);
ASSERT_EQ(0, res);
hipFree(devBufferPtrX);
hipFree(devBufferPtrZ);
hipFree(devShapePtrX);
// needed due to memcpy
z.tickWriteHost();
for (int e = 0; e < z.lengthOf(); e++) {
// sd_printf("step %i\n", e);
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
}
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduceScalar_1) {
NDArray x1('c', {2, 2}, {0, 1, 2, 3}, sd::DataType::INT32);
NDArray x2('c', {2, 2}, {0.5, 1.5, -4.5, 3.5}, sd::DataType::BFLOAT16);
NDArray x3('c', {2, 2}, {0, -1, 0, 1}, sd::DataType::BOOL);
NDArray scalar('c', {}, std::vector<double>{0}, sd::DataType::INT64);
NDArray exp1('c', {}, std::vector<double>{3}, sd::DataType::INT64);
NDArray exp2('c', {}, std::vector<double>{2}, sd::DataType::INT64);
NDArray exp3('c', {}, std::vector<double>{1}, sd::DataType::INT64);
void *dX1, *dX2, *dX3, *dZ;
sd::LongType *dX1ShapeInfo, *dX2ShapeInfo, *dX3ShapeInfo, *dZShapeInfo;
hipError_t cudaResult;
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ), scalar.lengthOf() * scalar.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX2ShapeInfo), shape::shapeInfoByteLength(x2.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZShapeInfo), shape::shapeInfoByteLength(scalar.shapeInfo()));
ASSERT_EQ(0, cudaResult);
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
x1.syncToHost();
x2.syncToHost();
x3.syncToHost();
scalar.syncToHost();
hipMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX1ShapeInfo, x1.shapeInfo(), shape::shapeInfoByteLength(x1.shapeInfo()), hipMemcpyHostToDevice,
stream);
hipMemcpyAsync(dX2ShapeInfo, x2.shapeInfo(), shape::shapeInfoByteLength(x2.shapeInfo()), hipMemcpyHostToDevice,
stream);
hipMemcpyAsync(dX3ShapeInfo, x3.shapeInfo(), shape::shapeInfoByteLength(x3.shapeInfo()), hipMemcpyHostToDevice,
stream);
hipMemcpyAsync(dZShapeInfo, scalar.shapeInfo(), shape::shapeInfoByteLength(scalar.shapeInfo()),
hipMemcpyHostToDevice, stream);
void *reductionPointer = nullptr;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
cudaResult = hipMemset(reductionPointer, 0, 1024 * 1024);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream, LaunchContext::defaultContext()->getReductionPointer(),
LaunchContext::defaultContext()->getScalarPointer(),
LaunchContext::defaultContext()->getAllocationPointer());
/***************************************/
NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, x1.buffer(), x1.shapeInfo(), dX1,
dX1ShapeInfo, nullptr, scalar.buffer(), scalar.shapeInfo(), dZ,
dZShapeInfo);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), hipMemcpyDeviceToHost, stream);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
scalar.tickWriteHost();
ASSERT_NEAR(exp1.e<float>(0), scalar.e<float>(0), 1e-5);
/***************************************/
NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, nullptr, x2.shapeInfo(), dX2,
dX2ShapeInfo, nullptr, nullptr, scalar.shapeInfo(), dZ, dZShapeInfo);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), hipMemcpyDeviceToHost, stream);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp2.e<float>(0), scalar.e<float>(0), 1e-5);
// *************************************
NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, nullptr, x3.shapeInfo(), dX3,
dX3ShapeInfo, nullptr, nullptr, scalar.shapeInfo(), dZ, dZShapeInfo);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), hipMemcpyDeviceToHost, stream);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp3.e<float>(0), scalar.e<float>(0), 1e-5);
/***************************************/
hipFree(dX1);
hipFree(dX2);
hipFree(dX3);
hipFree(dZ);
hipFree(dX1ShapeInfo);
hipFree(dX2ShapeInfo);
hipFree(dX3ShapeInfo);
hipFree(dZShapeInfo);
/***************************************/
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3Scalar_1) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x1('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::INT32);
NDArray x2('c', {2, 2}, {-1, -2, -3, -4}, sd::DataType::INT32);
NDArray x3('c', {2, 2}, {1.5, 1.5, 1.5, 1.5}, sd::DataType::DOUBLE);
NDArray x4('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::DOUBLE);
NDArray exp1('c', {}, std::vector<double>{-30.f}, sd::DataType::FLOAT32);
NDArray exp2('c', {}, std::vector<double>{15.}, sd::DataType::DOUBLE);
NDArray scalar1('c', {}, std::vector<double>{100.f}, sd::DataType::FLOAT32);
NDArray scalar2('c', {}, std::vector<double>{100.}, sd::DataType::DOUBLE);
void *dX1, *dX2, *dX3, *dX4, *dZ1, *dZ2;
sd::LongType *dX1ShapeInfo, *dX3ShapeInfo, *dZ1ShapeInfo, *dZ2ShapeInfo;
hipError_t cudaResult;
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX4), x4.lengthOf() * x4.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ1), scalar1.lengthOf() * scalar1.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ2), scalar2.lengthOf() * scalar2.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ1ShapeInfo), shape::shapeInfoByteLength(scalar1.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ2ShapeInfo), shape::shapeInfoByteLength(scalar2.shapeInfo()));
ASSERT_EQ(0, cudaResult);
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
x1.syncToHost();
x2.syncToHost();
x3.syncToHost();
x4.syncToHost();
scalar1.syncToHost();
scalar2.syncToHost();
hipMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX4, x4.buffer(), x4.lengthOf() * x4.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX1ShapeInfo, x1.shapeInfo(), shape::shapeInfoByteLength(x1.shapeInfo()), hipMemcpyHostToDevice,
stream);
hipMemcpyAsync(dX3ShapeInfo, x3.shapeInfo(), shape::shapeInfoByteLength(x3.shapeInfo()), hipMemcpyHostToDevice,
stream);
hipMemcpyAsync(dZ1ShapeInfo, scalar1.shapeInfo(), shape::shapeInfoByteLength(scalar1.shapeInfo()),
hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dZ2ShapeInfo, scalar2.shapeInfo(), shape::shapeInfoByteLength(scalar2.shapeInfo()),
hipMemcpyHostToDevice, stream);
/***************************************/
void *reductionPointer = nullptr;
int *allocationPointer = nullptr;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream, reductionPointer, nullptr, allocationPointer);
/***************************************/
NativeOpExecutioner::execReduce3Scalar(&lc, sd::reduce3::Dot, nullptr, x1.shapeInfo(), dX1, dX1ShapeInfo, nullptr,
nullptr, x2.shapeInfo(), dX2, dX1ShapeInfo, nullptr, scalar1.shapeInfo(), dZ1,
dZ1ShapeInfo);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
scalar1.tickWriteHost();
scalar2.tickWriteHost();
hipMemcpyAsync(scalar1.buffer(), dZ1, scalar1.lengthOf() * scalar1.sizeOfT(), hipMemcpyDeviceToHost, stream);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp1.e<float>(0), scalar1.e<float>(0), 1e-5);
/***************************************/
NativeOpExecutioner::execReduce3Scalar(&lc, sd::reduce3::Dot, nullptr, x3.shapeInfo(), dX3, dX3ShapeInfo, nullptr,
nullptr, x4.shapeInfo(), dX4, dX3ShapeInfo, nullptr, scalar2.shapeInfo(), dZ2,
dZ2ShapeInfo);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(scalar2.buffer(), dZ2, scalar2.lengthOf() * scalar2.sizeOfT(), hipMemcpyDeviceToHost, stream);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp2.e<float>(0), scalar2.e<float>(0), 1e-5);
/***************************************/
hipFree(dX1);
hipFree(dX2);
hipFree(dX3);
hipFree(dX4);
hipFree(dZ1);
hipFree(dZ2);
hipFree(dX1ShapeInfo);
hipFree(dX3ShapeInfo);
hipFree(dZ1ShapeInfo);
hipFree(dZ2ShapeInfo);
/***************************************/
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_1) {
NDArray x('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::INT32);
NDArray y('c', {2, 2}, {-1, -2, -3, -4}, sd::DataType::INT32);
NDArray exp('c', {}, std::vector<double>{-30.f}, sd::DataType::FLOAT32);
NDArray z('c', {}, std::vector<double>{100.f}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0, 1};
x.syncToHost();
y.syncToHost();
z.syncToHost();
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
std::vector<void *> devicePtrs(hostData.size(), nullptr);
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), nullptr, nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_2) {
NDArray x('c', {2, 2}, {1.5, 1.5, 1.5, 1.5}, sd::DataType::DOUBLE);
NDArray y('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::DOUBLE);
NDArray exp('c', {}, std::vector<double>{15.}, sd::DataType::DOUBLE);
NDArray z('c', {}, std::vector<double>{100.}, sd::DataType::DOUBLE);
std::vector<int> dimensions = {0, 1};
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), nullptr, nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_3) {
NDArray x('c', {2, 3}, {1, 2, 3, 4, 5, 6}, sd::DataType::INT32);
NDArray y('c', {2, 3}, {-6, -5, -4, -3, -2, -1}, sd::DataType::INT32);
NDArray exp('c', {3}, {-18, -20, -18}, sd::DataType::FLOAT32);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo)); // 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(sd::LongType)); // 4-- yTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[3], (sd::LongType *)devicePtrs[4]);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_4) {
NDArray x('c', {2, 3}, {1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE);
NDArray y('c', {2, 3}, {1.5, 1.5, 1.5, 1.5, 1.5, 1.5}, sd::DataType::DOUBLE);
NDArray exp('c', {2}, {9, 22.5}, sd::DataType::DOUBLE);
NDArray z('c', {2}, {100, 100}, sd::DataType::DOUBLE);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo)); // 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(sd::LongType)); // 4-- yTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[3], (sd::LongType *)devicePtrs[4]);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_5) {
NDArray x('c', {2, 2, 3}, {1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5}, sd::DataType::FLOAT32);
NDArray y('c', {2, 2, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, sd::DataType::FLOAT32);
NDArray exp('c', {2, 3}, {7.5, 10.5, 13.5, 25.5, 28.5, 31.5}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo)); // 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(sd::LongType)); // 4-- yTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[3], (sd::LongType *)devicePtrs[4]);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3All_1) {
NDArray x('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::INT32);
NDArray y('c', {2, 3}, {-1, 1, -1, 1, -1, 1}, sd::DataType::INT32);
NDArray exp('c', {2, 3}, {2, -2, 2, 2, -2, 2}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo)); // 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(sd::LongType)); // 4 -- yTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3All(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[3], (sd::LongType *)devicePtrs[4]);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3All_2) {
NDArray x('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::DOUBLE);
NDArray y('c', {2, 3}, {1.5, 1.5, 1.5, 1.5, 1.5, 1.5}, sd::DataType::DOUBLE);
NDArray exp('c', {2, 3}, {6, 6, 6, 9, 9, 9}, sd::DataType::DOUBLE);
NDArray z('c', {2, 3},
{
100,
100,
100,
100,
100,
100,
},
sd::DataType::DOUBLE);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo)); // 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(sd::LongType)); // 4-- yTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3All(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[3], (sd::LongType *)devicePtrs[4]);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_1) {
NDArray x('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
x.linspace(-2.);
x.syncToDevice();
NDArray exp('c', {2}, {2, 2}, sd::DataType::INT64);
NDArray z('c', {2}, {100, 100}, sd::DataType::INT64);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream);
if (cudaResult != 0) throw sd::cuda_exception::build("execIndexReduce failed", cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_2) {
NDArray x('c', {2, 3, 4, 5},
{100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::FLOAT32);
x.linspace(-2.f);
x.syncToDevice();
NDArray exp('c', {2, 5}, {11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, sd::DataType::INT64);
NDArray z('c', {2, 5}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::INT64);
std::vector<int> dimensions = {1, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_3) {
NDArray x('c', {2, 3, 4, 5},
{100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::DOUBLE);
x.linspace(-2.);
x.syncToDevice();
NDArray exp('c', {3}, {39, 39, 39}, sd::DataType::INT64);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::INT64);
std::vector<int> dimensions = {0, 2, 3};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_1) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 3}, {0, 1, 2, 3, 4, 5}, sd::DataType::INT64);
NDArray exp('c', {2, 3}, {0, 0, 1, 1, 2, 2}, sd::DataType::INT64);
NDArray scalar('c', {}, std::vector<double>{2.f}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::INT64);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, sd::scalar::Divide, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalar.shapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(),
nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_2) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 3}, {-1, -2, -3, -4, -5, -6}, sd::DataType::INT64);
NDArray exp('c', {2, 3}, {10, 10, 10, 10, 10, 10}, sd::DataType::FLOAT32);
NDArray scalar('c', {}, std::vector<double>{10.f}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, sd::scalar::CopyPws, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalar.shapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(),
nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_3) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 3, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, sd::DataType::INT64);
NDArray scalars('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::FLOAT32);
NDArray exp('c', {2, 3, 2}, {0, 0, 2, 1, 4, 2, 2, 1, 2, 2, 3, 2}, sd::DataType::INT64);
NDArray z('c', {2, 3, 2}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::INT64);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, sd::scalar::Divide, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, scalars.shapeInfo(), scalars.specialBuffer(),
scalars.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalarBool_1) {
NDArray x('c', {2, 3}, {-1, -2, 0, 1, 2, 3}, sd::DataType::BFLOAT16);
NDArray scalar('c', {}, std::vector<double>{0}, sd::DataType::BFLOAT16);
NDArray exp('c', {2, 3}, {0, 0, 0, 1, 1, 1}, sd::DataType::BOOL);
NDArray z('c', {2, 3},
{
100,
100,
100,
100,
100,
100,
},
sd::DataType::BOOL);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
// call cuda kernel which calculates result
NativeOpExecutioner::execScalarBool(&lc, sd::scalar::GreaterThan, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, scalar.shapeInfo(), scalar.specialBuffer(),
scalar.specialShapeInfo(), nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalarBool_2) {
NDArray x('c', {2, 3}, {0, 1, 2, 3, 4, 5}, sd::DataType::FLOAT32);
NDArray scalars('c', {2}, {-1, 4}, sd::DataType::FLOAT32);
NDArray exp('c', {2, 3}, {1, 1, 1, 0, 0, 1}, sd::DataType::BOOL);
NDArray z('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::BOOL);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalarBool(&lc, sd::scalar::GreaterThan, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, scalars.shapeInfo(), scalars.specialBuffer(),
scalars.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcast_1) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::INT32);
NDArray y('c', {3}, {10, 20, 30}, sd::DataType::INT64);
NDArray z('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::INT32);
NDArray exp('c', {2, 3, 4},
{10, 11, 12, 13, 24, 25, 26, 27, 38, 39, 40, 41, 22, 23, 24, 25, 36, 37, 38, 39, 50, 51, 52, 53},
sd::DataType::INT32);
x.linspace(0);
x.syncToDevice();
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcast(&lc, sd::broadcast::Add, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcast_2) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::INT32);
NDArray y('c', {2, 4}, {10, 20, 30, 40, 50, 60, 70, 80}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::FLOAT32);
NDArray exp('c', {2, 3, 4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51.,
62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103},
sd::DataType::FLOAT32);
x.linspace(0);
x.syncToDevice();
std::vector<int> dimensions = {0, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcast(&lc, sd::broadcast::Add, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcastBool_1) {
NDArray x('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::INT32);
NDArray y('c', {3}, {2, 12, 22}, sd::DataType::INT32);
NDArray z('c', {2, 3, 4},
{
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
},
sd::DataType::BOOL);
NDArray exp('c', {2, 3, 4}, {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
sd::DataType::BOOL);
x.linspace(1);
x.syncToDevice();
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcastBool(
&lc, sd::broadcast::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcastBool_2) {
NDArray x('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::FLOAT32);
NDArray y('c', {2, 4}, {1, 10, 10, 15, 20, 20, 20, 24}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::BOOL);
NDArray exp('c', {2, 3, 4}, {1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
sd::DataType::BOOL);
x.linspace(1);
x.syncToDevice();
std::vector<int> dimensions = {0, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcastBool(
&lc, sd::broadcast::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execPairwiseTransform_1) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 2, 2}, {1, 5, 3, 7, 2, 6, 4, 8}, sd::DataType::INT32);
NDArray y('c', {4, 2}, {0.1, 0.2, 0.3, 0.4, 1.5, 0.6, 0.7, 1.8}, sd::DataType::DOUBLE);
NDArray z('c', {8}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::INT32);
NDArray exp('c', {8}, {0, 1, 2, 3, 3, 5, 6, 6}, sd::DataType::INT32);
x.permutei({2, 1, 0}); // -> {1,2,3,4,5,6,7,8}
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execPairwiseTransform(&lc, sd::pairwise::Subtract, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execPairwiseBoolTransform_1) {
NDArray x('c', {2, 2, 2}, {1, 5, 3, 7, 2, 6, 4, 8}, sd::DataType::INT64);
NDArray y('c', {4, 2}, {0, 2, 0, 4, 0, 6, 0, 8}, sd::DataType::INT64);
NDArray z('c', {8}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::BOOL);
NDArray exp('c', {8}, {0, 1, 0, 1, 0, 1, 0, 1}, sd::DataType::BOOL);
x.permutei({2, 1, 0}); // -> {1,2,3,4,5,6,7,8}
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execPairwiseBoolTransform(&lc, sd::pairwise::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformFloat_1) {
NDArray x('c', {2, 2}, {0, 6.25, 2.25, 12.25}, sd::DataType::DOUBLE);
NDArray z('c', {4}, {100, 100, 100, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {4}, {0, 1.5, 2.5, 3.5}, sd::DataType::FLOAT32);
x.permutei({1, 0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformFloat(&lc, sd::transform::Sqrt, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformFloat_2) {
NDArray x('c', {1, 4}, {0, 4, 9, 16}, sd::DataType::INT64);
NDArray z('c', {2, 2}, {100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {2, 2}, {0, 2, 3, 4}, sd::DataType::DOUBLE);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformFloat(&lc, sd::transform::Sqrt, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformAny_1) {
NDArray x('c', {2, 2}, {0, 6.25, 2.25, 12.25}, sd::DataType::DOUBLE);
NDArray z('c', {4, 1}, {100, 100, 100, 100}, sd::DataType::INT32);
NDArray exp('c', {4, 1}, {0, 2, 6, 12}, sd::DataType::INT32);
x.permutei({1, 0});
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformAny(&lc, sd::transform::Assign, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformAny_2) {
NDArray x('c', {1, 4}, {0, 6.25, 2.25, 12.25}, sd::DataType::BFLOAT16);
NDArray z('c', {2, 2}, {100, 100, 100, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {2, 2}, {0, 6.25, 2.25, 12.25}, sd::DataType::FLOAT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformAny(&lc, sd::transform::Assign, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformStrict_1) {
NDArray x('c', {2, 3}, {0, 2, 4, 1, 3, 5}, sd::DataType::DOUBLE);
NDArray z('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {3, 2}, {0, 3, 12, 27, 48, 75}, sd::DataType::DOUBLE);
x.permutei({1, 0});
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformStrict(&lc, sd::transform::CubeDerivative, nullptr, x.shapeInfo(),
x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(),
z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformStrict_2) {
NDArray x('c', {6}, {0, 1, 2, 3, 4, 5}, sd::DataType::FLOAT32);
NDArray z('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {3, 2}, {0, 3, 12, 27, 48, 75}, sd::DataType::FLOAT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformStrict(&lc, sd::transform::CubeDerivative, nullptr, x.shapeInfo(),
x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(),
z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformSame_1) {
NDArray x('c', {2, 3}, {0, 2.5, 4.5, 1.5, 3.5, 5.5}, sd::DataType::DOUBLE);
NDArray z('c', {1, 6}, {100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {1, 6}, {0, 2.25, 6.25, 12.25, 20.25, 30.25}, sd::DataType::DOUBLE);
x.permutei({1, 0});
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformSame(&lc, sd::transform::Square, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformSame_2) {
NDArray x('c', {6}, {0, 1, 2, 3, 4, 5}, sd::DataType::INT32);
NDArray z('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::INT32);
NDArray exp('c', {3, 2}, {0, 1, 4, 9, 16, 25}, sd::DataType::INT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformSame(&lc, sd::transform::Square, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformBool_1) {
NDArray x('c', {2, 3}, {0, 2, 4, -1, -3, -5}, sd::DataType::DOUBLE);
NDArray z('c', {1, 6}, {100, 100, 100, 100, 100, 100}, sd::DataType::BOOL);
NDArray exp('c', {1, 6}, {0, 0, 1, 0, 1, 0}, sd::DataType::BOOL);
x.permutei({1, 0});
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformBool(&lc, sd::transform::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformBool_2) {
NDArray x('c', {6}, {0, -1, 2, -3, 4, -5}, sd::DataType::INT32);
NDArray z('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::BOOL);
NDArray exp('c', {3, 2}, {0, 0, 1, 0, 1, 0}, sd::DataType::BOOL);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformBool(&lc, sd::transform::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloat_1) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {3}, {2.5, 6.5, 10.5}, sd::DataType::FLOAT32);
x.permutei({2, 1, 0});
std::vector<int> dimensions = {0, 2};
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceFloat(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloat_2) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {2, 4}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {2, 4}, {-1., 0., 1., 2., 11., 12., 13., 14.}, sd::DataType::DOUBLE);
std::vector<int> dimensions = {1};
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceFloat(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSame_1) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::INT32);
NDArray exp('c', {3}, {20, 52, 84}, sd::DataType::INT32);
x.permutei({2, 1, 0});
std::vector<int> dimensions = {0, 2};
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceSame(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSame_2) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::FLOAT32);
NDArray z('c', {2, 4}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {2, 4}, {-3., 0., 3., 6., 33., 36., 39., 42.}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceSame(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBool_1) {
NDArray x('c', {2, 3, 4},
{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18},
sd::DataType::INT32);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::BOOL);
NDArray exp('c', {3}, {0, 1, 1}, sd::DataType::BOOL);
x.permutei({2, 1, 0});
std::vector<int> dimensions = {0, 2};
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceBool(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBool_2) {
NDArray x('c', {2, 3, 4},
{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18},
sd::DataType::FLOAT32);
NDArray z('c', {2, 4}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::BOOL);
NDArray exp('c', {2, 4}, {1, 1, 1, 1, 0, 0, 0, 0}, sd::DataType::BOOL);
std::vector<int> dimensions = {1};
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceBool(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLong_1) {
NDArray x('c', {2, 3, 4}, {-5, 0, -3, 0, -1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 0, 16, 0, 18},
sd::DataType::INT32);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::INT64);
NDArray exp('c', {3}, {5, 6, 6}, sd::DataType::INT64);
x.permutei({2, 1, 0});
std::vector<int> dimensions = {0, 2};
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceLong(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLong_2) {
NDArray x('c', {2, 3, 4}, {-5, 0, -3, 0, -1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 0, 16, 0, 18},
sd::DataType::FLOAT32);
NDArray z('c', {2, 4}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::INT64);
NDArray exp('c', {2, 4}, {3, 1, 3, 2, 2, 1, 2, 3}, sd::DataType::INT64);
std::vector<int> dimensions = {1};
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceLong(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloatScalar_1) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32);
NDArray exp('c', {}, std::vector<double>{6.5}, sd::DataType::FLOAT32);
x.permutei({2, 1, 0});
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloatScalar(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloatScalar_2) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::DOUBLE);
NDArray exp('c', {}, std::vector<double>{6.5}, sd::DataType::DOUBLE);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloatScalar(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSameScalar_1) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT32);
NDArray exp('c', {}, std::vector<double>{156}, sd::DataType::INT32);
x.permutei({2, 1, 0});
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSameScalar(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSameScalar_2) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::DOUBLE);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::DOUBLE);
NDArray exp('c', {}, std::vector<double>{156}, sd::DataType::DOUBLE);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSameScalar(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBoolScalar_1) {
NDArray x('c', {2, 3, 4},
{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18},
sd::DataType::INT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::BOOL);
NDArray exp('c', {}, std::vector<double>{1}, sd::DataType::BOOL);
x.permutei({2, 1, 0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBoolScalar(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBoolScalar_2) {
NDArray x('c', {2, 3, 4},
{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18},
sd::DataType::DOUBLE);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::BOOL);
NDArray exp('c', {}, std::vector<double>{1}, sd::DataType::BOOL);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBoolScalar(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLongScalar_1) {
NDArray x('c', {2, 3, 4}, {-5, 0, -3, 0, -1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 0, 16, 0, 18},
sd::DataType::INT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT64);
NDArray exp('c', {}, std::vector<double>{17}, sd::DataType::INT64);
x.permutei({2, 1, 0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLongScalar(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLongScalar_2) {
NDArray x('c', {2, 3, 4}, {-5, 0, -3, 0, -1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 0, 16, 0, 18},
sd::DataType::DOUBLE);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT64);
NDArray exp('c', {}, std::vector<double>{17}, sd::DataType::INT64);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLongScalar(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_1) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::FLOAT32);
NDArray y('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::FLOAT32);
NDArray exp('c', {3}, {10, 20, 30}, sd::DataType::DOUBLE);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::DOUBLE);
std::vector<int> dimensions = {0, 1};
auto packX = ConstantTadHelper::getInstance().tadForDimensions(x.shapeInfo(), dimensions);
LaunchContext *context = x.getContext();
x.syncToDevice();
y.syncToDevice();
PointersManager pm(context, "execReduce3TAD_1");
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(context, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, dimensions.size(), packX.specialShapeInfo(),
packX.specialOffsets(), nullptr, nullptr);
pm.synchronize();
// cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// z.printIndexedBuffer("OutputReduce3TAD");
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_2) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::INT64);
NDArray y('c', {2, 3}, {1, 2, 3, 4, 5, 6}, sd::DataType::INT64);
NDArray exp('c', {2}, {10, 73}, sd::DataType::FLOAT32);
NDArray z('c', {2}, {100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_3) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::INT64);
NDArray y('c', {3}, {1, 2, 3}, sd::DataType::INT64);
NDArray exp('c', {2, 2}, {-22, -4, 14, 32}, sd::DataType::FLOAT32);
NDArray z('c', {2, 2}, {100, 100, 100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_4) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE);
NDArray y('c', {2, 2, 3}, {10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120}, sd::DataType::DOUBLE);
NDArray exp('c', {}, std::vector<double>{1820}, sd::DataType::FLOAT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0, 1, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_1) {
// FIXME: Yurii, this test should be fixed
if (1 > 0) return;
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::INT64);
NDArray exp('c', {}, std::vector<double>{3.605551}, sd::DataType::FLOAT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(),
x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(),
z.specialBuffer(), z.specialShapeInfo(), true);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_2) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -20, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE);
NDArray exp('c', {2}, {3.405877, 9.715966}, sd::DataType::FLOAT32);
NDArray z('c', {2}, {100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(
&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], true);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
/*
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_3) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-20,-1,0,1,2,3,4,5,6}, sd::DataType::DOUBLE);
NDArray exp('c', {2}, {10.606602, 2.121320}, sd::DataType::FLOAT32);
NDArray z('c', {2}, {100,100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 --
xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2
-- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(&lc, sd::variance::SummaryStatsStandardDeviation,
nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.shapeInfo(), z.specialBuffer(), z.special(),
(int*)devicePtrs[0], dimensions.size(),
(sd::LongType*)devicePtrs[1], (sd::LongType*)devicePtrs[2],
true);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
*/
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStatsScalar_1) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::INT64);
NDArray exp('c', {}, std::vector<double>{3.605551}, sd::DataType::FLOAT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStatsScalar(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(),
x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(),
z.specialBuffer(), z.specialShapeInfo(), true);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_1) {
// NDArray z('c', {10}, {100,0,0,0,0,0,0,0,0,0}, sd::DataType::DOUBLE);
NDArray z('c', {10}, {100, 0, 0, 0, 0, 0, 0, 0, 0, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {10},
{0.050942, -0.183229, -0.093921, 0.075469, 0.257166, -0.254838, 0.342227, -0.682188, -0.004345, 0.464633},
sd::DataType::FLOAT32);
sd::graph::RandomGenerator gen(119, 5);
hipError_t cudaResult;
NDArray *array = &z;
ExtraArguments arguments({0.f, 0.5f});
auto context = z.getContext();
PointersManager pm(context, "tests::execRandom_1");
// z.printIndexedBuffer("Input data");
// z.syncToDevice();
NativeOpExecutioner::execRandom(context, random::GaussianDistribution, &gen, array->buffer(), array->shapeInfo(),
array->specialBuffer(), array->specialShapeInfo(), array->buffer(),
array->shapeInfo(), array->specialBuffer(), array->specialShapeInfo(),
array->buffer(), array->shapeInfo(), array->specialBuffer(),
array->specialShapeInfo(), arguments.argumentsAsT(array->dataType()));
pm.synchronize();
z.tickWriteDevice();
// z.printIndexedBuffer("Output Gaussian");
// RandomLauncher::fillGaussian(context, gen, &z, 0.f, 0.5f);
// pm.synchronize();
// z.tickWriteDevice();
// z.printIndexedBuffer("Output Gaussian");
// hipStream_t stream;
// cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
// LaunchContext lc(&stream);
//
// // ::execRandom(extraPointers, random::GaussianDistribution, &gen, z.buffer(), z.shapeInfo(),
// z.specialBuffer(), z.special(), &extra);
// // call cuda kernel which calculates result
// NativeOpExecutioner::execRandom(&lc, sd::random::GaussianDistribution,
// &gen,
// nullptr, z.shapeInfo(), z.specialBuffer(), z.special(),
// nullptr, z.shapeInfo(), z.specialBuffer(), z.special(),
// nullptr, z.shapeInfo(), z.specialBuffer(), z.special(),
// extraArguments.argumentsAsT(z.dataType()));
//
// cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
// ASSERT_EQ(cudaResult, 0);
// z.tickWriteDevice();
// z.syncToHost();
// z.printIndexedBuffer("Random1");
ASSERT_EQ(exp, z);
// // verify results
// for (int e = 0; e < z.lengthOf(); e++)
// ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// hipFree(dExtraArgs);
// free allocated global device memory
// hipFree(dGen);
// delete cuda stream
// cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_2) {
NDArray x('c', {10}, {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, sd::DataType::DOUBLE);
NDArray z('c', {2, 5}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {10}, {0., 0., 0.3, 0., 0.5, 0., 0.7, 0., 0., 1.}, sd::DataType::DOUBLE);
ExtraArguments extraArguments({0.7});
sd::graph::RandomGenerator gen(119, 5);
// // prepare input arrays for prepareDataForCuda function
// std::vector<std::pair<void*,size_t>> hostData;
// hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
// std::vector<void*> devicePtrs(hostData.size(), nullptr);
//
// create cuda stream and LaunchContext
hipError_t cudaResult;
// hipStream_t stream;
// cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext *lc = x.getContext(); //(&stream);
// allocate required amount of global device memory and copy host data to it
// cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(lc, sd::random::DropOut, &gen, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
extraArguments.argumentsAsT(z.dataType()));
cudaResult = hipStreamSynchronize(*lc->getCudaStream());
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
// for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
// cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_3) {
NDArray z('c', {10}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {10},
{2.373649, 2.239791, 1.887353, 2.488636, 2.068904, 2.281399, 1.828228, 2.228222, 2.490847, 1.669537},
sd::DataType::DOUBLE);
std::vector<double> extraArguments = {1.5, 2.5};
sd::graph::RandomGenerator gen(119, 5);
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(&lc, sd::random::UniformDistribution, &gen, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), devicePtrs[0]);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_4) {
NDArray z('c', {2, 5}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, sd::DataType::FLOAT32);
NDArray exp('c', {10},
{2.373649, 2.281399, 2.239791, 1.828228, 1.887353, 2.228222, 2.488636, 2.490847, 2.068904, 1.669537},
sd::DataType::FLOAT32);
z.permutei({1, 0});
ExtraArguments extraArguments({1.5, 2.5});
sd::graph::RandomGenerator gen(119, 5);
// // prepare input arrays for prepareDataForCuda function
// std::vector<std::pair<void*,size_t>> hostData;
// hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
// std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
// hipError_t cudaResult;
// hipStream_t stream;
// cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
// LaunchContext lc(&stream);
//
// // allocate required amount of global device memory and copy host data to it
// cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
auto context = z.getContext();
PointersManager pm(context, "execRandom4");
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(context, sd::random::UniformDistribution, &gen, nullptr, z.shapeInfo(),
z.specialBuffer(), z.specialShapeInfo(), extraArguments.argumentsAsT(z.dataType()));
// cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// z.printIndexedBuffer("Output Uniform4");
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
// for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
// cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
| f33b490082fa98ad3edfc49d9236b2801fd05956.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <array/ConstantDataBuffer.h>
#include <array/NDArray.h>
#include <array/NDArrayFactory.h>
#include <array/ShapeDescriptor.h>
#include <cuda.h>
#include <exceptions/cuda_exception.h>
#include <graph/Context.h>
#include <graph/Node.h>
#include <graph/Variable.h>
#include <graph/VariableSpace.h>
#include <helpers/ConstantShapeHelper.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/MmulHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/RandomLauncher.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/specials_cuda.h>
#include "testlayers.h"
using namespace sd;
using namespace sd::graph;
class CudaBasicsTests1 : public testing::Test {
public:
};
//////////////////////////////////////////////////////////////////////////
static cudaError_t allocateDeviceMem(LaunchContext &lc, std::vector<void *> &devicePtrs,
const std::vector<std::pair<void *, size_t>> &hostData) {
if (devicePtrs.size() != hostData.size())
throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !");
cudaError_t cudaResult;
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
if (cudaResult != 0) return cudaResult;
int *allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
if (cudaResult != 0) return cudaResult;
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
cudaStream_t stream = *lc.getCudaStream();
for (int i = 0; i < devicePtrs.size(); ++i) {
cudaResult = cudaMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second);
if (cudaResult != 0) return cudaResult;
cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, stream);
}
return cudaResult;
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, TestPairwise_1) {
// allocating host-side arrays
auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5});
auto z = NDArrayFactory::create<double>('c', {5}, {0, 0, 0, 0, 0});
auto exp = NDArrayFactory::create<double>('c', {5}, {2, 4, 6, 8, 10});
// making raw buffers
sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX;
cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT());
ASSERT_EQ(0, res);
res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT());
ASSERT_EQ(0, res);
res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo()));
ASSERT_EQ(0, res);
sd::Pointer nativeStream = (sd::Pointer)malloc(sizeof(cudaStream_t));
CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t));
cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream));
auto stream = reinterpret_cast<cudaStream_t *>(&nativeStream);
x.dataBuffer()->allocatePrimary();
x.syncToHost();
cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream);
cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice,
*stream);
res = cudaStreamSynchronize(*stream);
ASSERT_EQ(0, res);
LaunchContext lc(stream, nullptr, nullptr);
NativeOpExecutioner::execPairwiseTransform(
&lc, pairwise::Add, nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<sd::LongType *>(devShapePtrX),
nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<sd::LongType *>(devShapePtrX), nullptr, z.shapeInfo(),
devBufferPtrZ, reinterpret_cast<sd::LongType *>(devShapePtrX), nullptr);
res = cudaStreamSynchronize(*stream);
ASSERT_EQ(0, res);
z.dataBuffer()->allocatePrimary();
cudaMemcpyAsync(z.buffer(), devBufferPtrZ, z.lengthOf() * x.sizeOfT(), cudaMemcpyDeviceToHost, *stream);
res = cudaStreamSynchronize(*stream);
ASSERT_EQ(0, res);
cudaFree(devBufferPtrX);
cudaFree(devBufferPtrZ);
cudaFree(devShapePtrX);
// needed due to memcpy
z.tickWriteHost();
for (int e = 0; e < z.lengthOf(); e++) {
// sd_printf("step %i\n", e);
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
}
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduceScalar_1) {
NDArray x1('c', {2, 2}, {0, 1, 2, 3}, sd::DataType::INT32);
NDArray x2('c', {2, 2}, {0.5, 1.5, -4.5, 3.5}, sd::DataType::BFLOAT16);
NDArray x3('c', {2, 2}, {0, -1, 0, 1}, sd::DataType::BOOL);
NDArray scalar('c', {}, std::vector<double>{0}, sd::DataType::INT64);
NDArray exp1('c', {}, std::vector<double>{3}, sd::DataType::INT64);
NDArray exp2('c', {}, std::vector<double>{2}, sd::DataType::INT64);
NDArray exp3('c', {}, std::vector<double>{1}, sd::DataType::INT64);
void *dX1, *dX2, *dX3, *dZ;
sd::LongType *dX1ShapeInfo, *dX2ShapeInfo, *dX3ShapeInfo, *dZShapeInfo;
cudaError_t cudaResult;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ), scalar.lengthOf() * scalar.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX2ShapeInfo), shape::shapeInfoByteLength(x2.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZShapeInfo), shape::shapeInfoByteLength(scalar.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
x1.syncToHost();
x2.syncToHost();
x3.syncToHost();
scalar.syncToHost();
cudaMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX1ShapeInfo, x1.shapeInfo(), shape::shapeInfoByteLength(x1.shapeInfo()), cudaMemcpyHostToDevice,
stream);
cudaMemcpyAsync(dX2ShapeInfo, x2.shapeInfo(), shape::shapeInfoByteLength(x2.shapeInfo()), cudaMemcpyHostToDevice,
stream);
cudaMemcpyAsync(dX3ShapeInfo, x3.shapeInfo(), shape::shapeInfoByteLength(x3.shapeInfo()), cudaMemcpyHostToDevice,
stream);
cudaMemcpyAsync(dZShapeInfo, scalar.shapeInfo(), shape::shapeInfoByteLength(scalar.shapeInfo()),
cudaMemcpyHostToDevice, stream);
void *reductionPointer = nullptr;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMemset(reductionPointer, 0, 1024 * 1024);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream, LaunchContext::defaultContext()->getReductionPointer(),
LaunchContext::defaultContext()->getScalarPointer(),
LaunchContext::defaultContext()->getAllocationPointer());
/***************************************/
NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, x1.buffer(), x1.shapeInfo(), dX1,
dX1ShapeInfo, nullptr, scalar.buffer(), scalar.shapeInfo(), dZ,
dZShapeInfo);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), cudaMemcpyDeviceToHost, stream);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
scalar.tickWriteHost();
ASSERT_NEAR(exp1.e<float>(0), scalar.e<float>(0), 1e-5);
/***************************************/
NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, nullptr, x2.shapeInfo(), dX2,
dX2ShapeInfo, nullptr, nullptr, scalar.shapeInfo(), dZ, dZShapeInfo);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), cudaMemcpyDeviceToHost, stream);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp2.e<float>(0), scalar.e<float>(0), 1e-5);
// *************************************
NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, nullptr, x3.shapeInfo(), dX3,
dX3ShapeInfo, nullptr, nullptr, scalar.shapeInfo(), dZ, dZShapeInfo);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), cudaMemcpyDeviceToHost, stream);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp3.e<float>(0), scalar.e<float>(0), 1e-5);
/***************************************/
cudaFree(dX1);
cudaFree(dX2);
cudaFree(dX3);
cudaFree(dZ);
cudaFree(dX1ShapeInfo);
cudaFree(dX2ShapeInfo);
cudaFree(dX3ShapeInfo);
cudaFree(dZShapeInfo);
/***************************************/
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3Scalar_1) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x1('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::INT32);
NDArray x2('c', {2, 2}, {-1, -2, -3, -4}, sd::DataType::INT32);
NDArray x3('c', {2, 2}, {1.5, 1.5, 1.5, 1.5}, sd::DataType::DOUBLE);
NDArray x4('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::DOUBLE);
NDArray exp1('c', {}, std::vector<double>{-30.f}, sd::DataType::FLOAT32);
NDArray exp2('c', {}, std::vector<double>{15.}, sd::DataType::DOUBLE);
NDArray scalar1('c', {}, std::vector<double>{100.f}, sd::DataType::FLOAT32);
NDArray scalar2('c', {}, std::vector<double>{100.}, sd::DataType::DOUBLE);
void *dX1, *dX2, *dX3, *dX4, *dZ1, *dZ2;
sd::LongType *dX1ShapeInfo, *dX3ShapeInfo, *dZ1ShapeInfo, *dZ2ShapeInfo;
cudaError_t cudaResult;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX4), x4.lengthOf() * x4.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ1), scalar1.lengthOf() * scalar1.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ2), scalar2.lengthOf() * scalar2.sizeOfT());
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ1ShapeInfo), shape::shapeInfoByteLength(scalar1.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ2ShapeInfo), shape::shapeInfoByteLength(scalar2.shapeInfo()));
ASSERT_EQ(0, cudaResult);
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
x1.syncToHost();
x2.syncToHost();
x3.syncToHost();
x4.syncToHost();
scalar1.syncToHost();
scalar2.syncToHost();
cudaMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX4, x4.buffer(), x4.lengthOf() * x4.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX1ShapeInfo, x1.shapeInfo(), shape::shapeInfoByteLength(x1.shapeInfo()), cudaMemcpyHostToDevice,
stream);
cudaMemcpyAsync(dX3ShapeInfo, x3.shapeInfo(), shape::shapeInfoByteLength(x3.shapeInfo()), cudaMemcpyHostToDevice,
stream);
cudaMemcpyAsync(dZ1ShapeInfo, scalar1.shapeInfo(), shape::shapeInfoByteLength(scalar1.shapeInfo()),
cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dZ2ShapeInfo, scalar2.shapeInfo(), shape::shapeInfoByteLength(scalar2.shapeInfo()),
cudaMemcpyHostToDevice, stream);
/***************************************/
void *reductionPointer = nullptr;
int *allocationPointer = nullptr;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream, reductionPointer, nullptr, allocationPointer);
/***************************************/
NativeOpExecutioner::execReduce3Scalar(&lc, sd::reduce3::Dot, nullptr, x1.shapeInfo(), dX1, dX1ShapeInfo, nullptr,
nullptr, x2.shapeInfo(), dX2, dX1ShapeInfo, nullptr, scalar1.shapeInfo(), dZ1,
dZ1ShapeInfo);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
scalar1.tickWriteHost();
scalar2.tickWriteHost();
cudaMemcpyAsync(scalar1.buffer(), dZ1, scalar1.lengthOf() * scalar1.sizeOfT(), cudaMemcpyDeviceToHost, stream);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp1.e<float>(0), scalar1.e<float>(0), 1e-5);
/***************************************/
NativeOpExecutioner::execReduce3Scalar(&lc, sd::reduce3::Dot, nullptr, x3.shapeInfo(), dX3, dX3ShapeInfo, nullptr,
nullptr, x4.shapeInfo(), dX4, dX3ShapeInfo, nullptr, scalar2.shapeInfo(), dZ2,
dZ2ShapeInfo);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(scalar2.buffer(), dZ2, scalar2.lengthOf() * scalar2.sizeOfT(), cudaMemcpyDeviceToHost, stream);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp2.e<float>(0), scalar2.e<float>(0), 1e-5);
/***************************************/
cudaFree(dX1);
cudaFree(dX2);
cudaFree(dX3);
cudaFree(dX4);
cudaFree(dZ1);
cudaFree(dZ2);
cudaFree(dX1ShapeInfo);
cudaFree(dX3ShapeInfo);
cudaFree(dZ1ShapeInfo);
cudaFree(dZ2ShapeInfo);
/***************************************/
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_1) {
NDArray x('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::INT32);
NDArray y('c', {2, 2}, {-1, -2, -3, -4}, sd::DataType::INT32);
NDArray exp('c', {}, std::vector<double>{-30.f}, sd::DataType::FLOAT32);
NDArray z('c', {}, std::vector<double>{100.f}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0, 1};
x.syncToHost();
y.syncToHost();
z.syncToHost();
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
std::vector<void *> devicePtrs(hostData.size(), nullptr);
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), nullptr, nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_2) {
NDArray x('c', {2, 2}, {1.5, 1.5, 1.5, 1.5}, sd::DataType::DOUBLE);
NDArray y('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::DOUBLE);
NDArray exp('c', {}, std::vector<double>{15.}, sd::DataType::DOUBLE);
NDArray z('c', {}, std::vector<double>{100.}, sd::DataType::DOUBLE);
std::vector<int> dimensions = {0, 1};
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), nullptr, nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_3) {
NDArray x('c', {2, 3}, {1, 2, 3, 4, 5, 6}, sd::DataType::INT32);
NDArray y('c', {2, 3}, {-6, -5, -4, -3, -2, -1}, sd::DataType::INT32);
NDArray exp('c', {3}, {-18, -20, -18}, sd::DataType::FLOAT32);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo)); // 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(sd::LongType)); // 4-- yTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[3], (sd::LongType *)devicePtrs[4]);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_4) {
NDArray x('c', {2, 3}, {1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE);
NDArray y('c', {2, 3}, {1.5, 1.5, 1.5, 1.5, 1.5, 1.5}, sd::DataType::DOUBLE);
NDArray exp('c', {2}, {9, 22.5}, sd::DataType::DOUBLE);
NDArray z('c', {2}, {100, 100}, sd::DataType::DOUBLE);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo)); // 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(sd::LongType)); // 4-- yTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[3], (sd::LongType *)devicePtrs[4]);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_5) {
NDArray x('c', {2, 2, 3}, {1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5}, sd::DataType::FLOAT32);
NDArray y('c', {2, 2, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, sd::DataType::FLOAT32);
NDArray exp('c', {2, 3}, {7.5, 10.5, 13.5, 25.5, 28.5, 31.5}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo)); // 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(sd::LongType)); // 4-- yTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[3], (sd::LongType *)devicePtrs[4]);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3All_1) {
NDArray x('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::INT32);
NDArray y('c', {2, 3}, {-1, 1, -1, 1, -1, 1}, sd::DataType::INT32);
NDArray exp('c', {2, 3}, {2, -2, 2, 2, -2, 2}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo)); // 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(sd::LongType)); // 4 -- yTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3All(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[3], (sd::LongType *)devicePtrs[4]);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3All_2) {
NDArray x('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::DOUBLE);
NDArray y('c', {2, 3}, {1.5, 1.5, 1.5, 1.5, 1.5, 1.5}, sd::DataType::DOUBLE);
NDArray exp('c', {2, 3}, {6, 6, 6, 9, 9, 9}, sd::DataType::DOUBLE);
NDArray z('c', {2, 3},
{
100,
100,
100,
100,
100,
100,
},
sd::DataType::DOUBLE);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo)); // 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(sd::LongType)); // 4-- yTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3All(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[3], (sd::LongType *)devicePtrs[4]);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_1) {
NDArray x('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
x.linspace(-2.);
x.syncToDevice();
NDArray exp('c', {2}, {2, 2}, sd::DataType::INT64);
NDArray z('c', {2}, {100, 100}, sd::DataType::INT64);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream);
if (cudaResult != 0) throw sd::cuda_exception::build("execIndexReduce failed", cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_2) {
NDArray x('c', {2, 3, 4, 5},
{100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::FLOAT32);
x.linspace(-2.f);
x.syncToDevice();
NDArray exp('c', {2, 5}, {11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, sd::DataType::INT64);
NDArray z('c', {2, 5}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::INT64);
std::vector<int> dimensions = {1, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_3) {
NDArray x('c', {2, 3, 4, 5},
{100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::DOUBLE);
x.linspace(-2.);
x.syncToDevice();
NDArray exp('c', {3}, {39, 39, 39}, sd::DataType::INT64);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::INT64);
std::vector<int> dimensions = {0, 2, 3};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_1) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 3}, {0, 1, 2, 3, 4, 5}, sd::DataType::INT64);
NDArray exp('c', {2, 3}, {0, 0, 1, 1, 2, 2}, sd::DataType::INT64);
NDArray scalar('c', {}, std::vector<double>{2.f}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::INT64);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, sd::scalar::Divide, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalar.shapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(),
nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_2) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 3}, {-1, -2, -3, -4, -5, -6}, sd::DataType::INT64);
NDArray exp('c', {2, 3}, {10, 10, 10, 10, 10, 10}, sd::DataType::FLOAT32);
NDArray scalar('c', {}, std::vector<double>{10.f}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, sd::scalar::CopyPws, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalar.shapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(),
nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_3) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 3, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, sd::DataType::INT64);
NDArray scalars('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::FLOAT32);
NDArray exp('c', {2, 3, 2}, {0, 0, 2, 1, 4, 2, 2, 1, 2, 2, 3, 2}, sd::DataType::INT64);
NDArray z('c', {2, 3, 2}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::INT64);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, sd::scalar::Divide, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, scalars.shapeInfo(), scalars.specialBuffer(),
scalars.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalarBool_1) {
NDArray x('c', {2, 3}, {-1, -2, 0, 1, 2, 3}, sd::DataType::BFLOAT16);
NDArray scalar('c', {}, std::vector<double>{0}, sd::DataType::BFLOAT16);
NDArray exp('c', {2, 3}, {0, 0, 0, 1, 1, 1}, sd::DataType::BOOL);
NDArray z('c', {2, 3},
{
100,
100,
100,
100,
100,
100,
},
sd::DataType::BOOL);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
// call cuda kernel which calculates result
NativeOpExecutioner::execScalarBool(&lc, sd::scalar::GreaterThan, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, scalar.shapeInfo(), scalar.specialBuffer(),
scalar.specialShapeInfo(), nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalarBool_2) {
NDArray x('c', {2, 3}, {0, 1, 2, 3, 4, 5}, sd::DataType::FLOAT32);
NDArray scalars('c', {2}, {-1, 4}, sd::DataType::FLOAT32);
NDArray exp('c', {2, 3}, {1, 1, 1, 0, 0, 1}, sd::DataType::BOOL);
NDArray z('c', {2, 3}, {100, 100, 100, 100, 100, 100}, sd::DataType::BOOL);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalarBool(&lc, sd::scalar::GreaterThan, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, scalars.shapeInfo(), scalars.specialBuffer(),
scalars.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcast_1) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::INT32);
NDArray y('c', {3}, {10, 20, 30}, sd::DataType::INT64);
NDArray z('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::INT32);
NDArray exp('c', {2, 3, 4},
{10, 11, 12, 13, 24, 25, 26, 27, 38, 39, 40, 41, 22, 23, 24, 25, 36, 37, 38, 39, 50, 51, 52, 53},
sd::DataType::INT32);
x.linspace(0);
x.syncToDevice();
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcast(&lc, sd::broadcast::Add, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcast_2) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::INT32);
NDArray y('c', {2, 4}, {10, 20, 30, 40, 50, 60, 70, 80}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::FLOAT32);
NDArray exp('c', {2, 3, 4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51.,
62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103},
sd::DataType::FLOAT32);
x.linspace(0);
x.syncToDevice();
std::vector<int> dimensions = {0, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcast(&lc, sd::broadcast::Add, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcastBool_1) {
NDArray x('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::INT32);
NDArray y('c', {3}, {2, 12, 22}, sd::DataType::INT32);
NDArray z('c', {2, 3, 4},
{
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
},
sd::DataType::BOOL);
NDArray exp('c', {2, 3, 4}, {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
sd::DataType::BOOL);
x.linspace(1);
x.syncToDevice();
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcastBool(
&lc, sd::broadcast::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcastBool_2) {
NDArray x('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::FLOAT32);
NDArray y('c', {2, 4}, {1, 10, 10, 15, 20, 20, 20, 24}, sd::DataType::FLOAT32);
NDArray z('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100},
sd::DataType::BOOL);
NDArray exp('c', {2, 3, 4}, {1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
sd::DataType::BOOL);
x.linspace(1);
x.syncToDevice();
std::vector<int> dimensions = {0, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcastBool(
&lc, sd::broadcast::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execPairwiseTransform_1) {
if (!Environment::getInstance().isExperimentalBuild()) return;
NDArray x('c', {2, 2, 2}, {1, 5, 3, 7, 2, 6, 4, 8}, sd::DataType::INT32);
NDArray y('c', {4, 2}, {0.1, 0.2, 0.3, 0.4, 1.5, 0.6, 0.7, 1.8}, sd::DataType::DOUBLE);
NDArray z('c', {8}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::INT32);
NDArray exp('c', {8}, {0, 1, 2, 3, 3, 5, 6, 6}, sd::DataType::INT32);
x.permutei({2, 1, 0}); // -> {1,2,3,4,5,6,7,8}
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execPairwiseTransform(&lc, sd::pairwise::Subtract, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execPairwiseBoolTransform_1) {
NDArray x('c', {2, 2, 2}, {1, 5, 3, 7, 2, 6, 4, 8}, sd::DataType::INT64);
NDArray y('c', {4, 2}, {0, 2, 0, 4, 0, 6, 0, 8}, sd::DataType::INT64);
NDArray z('c', {8}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::BOOL);
NDArray exp('c', {8}, {0, 1, 0, 1, 0, 1, 0, 1}, sd::DataType::BOOL);
x.permutei({2, 1, 0}); // -> {1,2,3,4,5,6,7,8}
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execPairwiseBoolTransform(&lc, sd::pairwise::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformFloat_1) {
NDArray x('c', {2, 2}, {0, 6.25, 2.25, 12.25}, sd::DataType::DOUBLE);
NDArray z('c', {4}, {100, 100, 100, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {4}, {0, 1.5, 2.5, 3.5}, sd::DataType::FLOAT32);
x.permutei({1, 0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformFloat(&lc, sd::transform::Sqrt, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformFloat_2) {
NDArray x('c', {1, 4}, {0, 4, 9, 16}, sd::DataType::INT64);
NDArray z('c', {2, 2}, {100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {2, 2}, {0, 2, 3, 4}, sd::DataType::DOUBLE);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformFloat(&lc, sd::transform::Sqrt, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformAny_1) {
NDArray x('c', {2, 2}, {0, 6.25, 2.25, 12.25}, sd::DataType::DOUBLE);
NDArray z('c', {4, 1}, {100, 100, 100, 100}, sd::DataType::INT32);
NDArray exp('c', {4, 1}, {0, 2, 6, 12}, sd::DataType::INT32);
x.permutei({1, 0});
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformAny(&lc, sd::transform::Assign, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformAny_2) {
NDArray x('c', {1, 4}, {0, 6.25, 2.25, 12.25}, sd::DataType::BFLOAT16);
NDArray z('c', {2, 2}, {100, 100, 100, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {2, 2}, {0, 6.25, 2.25, 12.25}, sd::DataType::FLOAT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformAny(&lc, sd::transform::Assign, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformStrict_1) {
NDArray x('c', {2, 3}, {0, 2, 4, 1, 3, 5}, sd::DataType::DOUBLE);
NDArray z('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {3, 2}, {0, 3, 12, 27, 48, 75}, sd::DataType::DOUBLE);
x.permutei({1, 0});
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformStrict(&lc, sd::transform::CubeDerivative, nullptr, x.shapeInfo(),
x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(),
z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformStrict_2) {
NDArray x('c', {6}, {0, 1, 2, 3, 4, 5}, sd::DataType::FLOAT32);
NDArray z('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {3, 2}, {0, 3, 12, 27, 48, 75}, sd::DataType::FLOAT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformStrict(&lc, sd::transform::CubeDerivative, nullptr, x.shapeInfo(),
x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(),
z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformSame_1) {
NDArray x('c', {2, 3}, {0, 2.5, 4.5, 1.5, 3.5, 5.5}, sd::DataType::DOUBLE);
NDArray z('c', {1, 6}, {100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {1, 6}, {0, 2.25, 6.25, 12.25, 20.25, 30.25}, sd::DataType::DOUBLE);
x.permutei({1, 0});
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformSame(&lc, sd::transform::Square, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformSame_2) {
NDArray x('c', {6}, {0, 1, 2, 3, 4, 5}, sd::DataType::INT32);
NDArray z('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::INT32);
NDArray exp('c', {3, 2}, {0, 1, 4, 9, 16, 25}, sd::DataType::INT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformSame(&lc, sd::transform::Square, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformBool_1) {
NDArray x('c', {2, 3}, {0, 2, 4, -1, -3, -5}, sd::DataType::DOUBLE);
NDArray z('c', {1, 6}, {100, 100, 100, 100, 100, 100}, sd::DataType::BOOL);
NDArray exp('c', {1, 6}, {0, 0, 1, 0, 1, 0}, sd::DataType::BOOL);
x.permutei({1, 0});
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformBool(&lc, sd::transform::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformBool_2) {
NDArray x('c', {6}, {0, -1, 2, -3, 4, -5}, sd::DataType::INT32);
NDArray z('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::BOOL);
NDArray exp('c', {3, 2}, {0, 0, 1, 0, 1, 0}, sd::DataType::BOOL);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformBool(&lc, sd::transform::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloat_1) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {3}, {2.5, 6.5, 10.5}, sd::DataType::FLOAT32);
x.permutei({2, 1, 0});
std::vector<int> dimensions = {0, 2};
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceFloat(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloat_2) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {2, 4}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {2, 4}, {-1., 0., 1., 2., 11., 12., 13., 14.}, sd::DataType::DOUBLE);
std::vector<int> dimensions = {1};
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceFloat(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSame_1) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::INT32);
NDArray exp('c', {3}, {20, 52, 84}, sd::DataType::INT32);
x.permutei({2, 1, 0});
std::vector<int> dimensions = {0, 2};
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceSame(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSame_2) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::FLOAT32);
NDArray z('c', {2, 4}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {2, 4}, {-3., 0., 3., 6., 33., 36., 39., 42.}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceSame(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBool_1) {
NDArray x('c', {2, 3, 4},
{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18},
sd::DataType::INT32);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::BOOL);
NDArray exp('c', {3}, {0, 1, 1}, sd::DataType::BOOL);
x.permutei({2, 1, 0});
std::vector<int> dimensions = {0, 2};
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceBool(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBool_2) {
NDArray x('c', {2, 3, 4},
{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18},
sd::DataType::FLOAT32);
NDArray z('c', {2, 4}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::BOOL);
NDArray exp('c', {2, 4}, {1, 1, 1, 1, 0, 0, 0, 0}, sd::DataType::BOOL);
std::vector<int> dimensions = {1};
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceBool(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLong_1) {
NDArray x('c', {2, 3, 4}, {-5, 0, -3, 0, -1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 0, 16, 0, 18},
sd::DataType::INT32);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::INT64);
NDArray exp('c', {3}, {5, 6, 6}, sd::DataType::INT64);
x.permutei({2, 1, 0});
std::vector<int> dimensions = {0, 2};
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceLong(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLong_2) {
NDArray x('c', {2, 3, 4}, {-5, 0, -3, 0, -1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 0, 16, 0, 18},
sd::DataType::FLOAT32);
NDArray z('c', {2, 4}, {100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::INT64);
NDArray exp('c', {2, 4}, {3, 1, 3, 2, 2, 1, 2, 3}, sd::DataType::INT64);
std::vector<int> dimensions = {1};
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions);
NativeOpExecutioner::execReduceLong(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), dims.data(), dims.size());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloatScalar_1) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32);
NDArray exp('c', {}, std::vector<double>{6.5}, sd::DataType::FLOAT32);
x.permutei({2, 1, 0});
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloatScalar(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloatScalar_2) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::DOUBLE);
NDArray exp('c', {}, std::vector<double>{6.5}, sd::DataType::DOUBLE);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloatScalar(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSameScalar_1) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::INT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT32);
NDArray exp('c', {}, std::vector<double>{156}, sd::DataType::INT32);
x.permutei({2, 1, 0});
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSameScalar(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSameScalar_2) {
NDArray x('c', {2, 3, 4}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
sd::DataType::DOUBLE);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::DOUBLE);
NDArray exp('c', {}, std::vector<double>{156}, sd::DataType::DOUBLE);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSameScalar(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBoolScalar_1) {
NDArray x('c', {2, 3, 4},
{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18},
sd::DataType::INT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::BOOL);
NDArray exp('c', {}, std::vector<double>{1}, sd::DataType::BOOL);
x.permutei({2, 1, 0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBoolScalar(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBoolScalar_2) {
NDArray x('c', {2, 3, 4},
{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18},
sd::DataType::DOUBLE);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::BOOL);
NDArray exp('c', {}, std::vector<double>{1}, sd::DataType::BOOL);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBoolScalar(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLongScalar_1) {
NDArray x('c', {2, 3, 4}, {-5, 0, -3, 0, -1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 0, 16, 0, 18},
sd::DataType::INT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT64);
NDArray exp('c', {}, std::vector<double>{17}, sd::DataType::INT64);
x.permutei({2, 1, 0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLongScalar(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLongScalar_2) {
NDArray x('c', {2, 3, 4}, {-5, 0, -3, 0, -1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 0, 16, 0, 18},
sd::DataType::DOUBLE);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT64);
NDArray exp('c', {}, std::vector<double>{17}, sd::DataType::INT64);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
int *allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLongScalar(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_1) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::FLOAT32);
NDArray y('c', {2, 2}, {1, 2, 3, 4}, sd::DataType::FLOAT32);
NDArray exp('c', {3}, {10, 20, 30}, sd::DataType::DOUBLE);
NDArray z('c', {3}, {100, 100, 100}, sd::DataType::DOUBLE);
std::vector<int> dimensions = {0, 1};
auto packX = ConstantTadHelper::getInstance().tadForDimensions(x.shapeInfo(), dimensions);
LaunchContext *context = x.getContext();
x.syncToDevice();
y.syncToDevice();
PointersManager pm(context, "execReduce3TAD_1");
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(context, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), nullptr, dimensions.size(), packX.specialShapeInfo(),
packX.specialOffsets(), nullptr, nullptr);
pm.synchronize();
// cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// z.printIndexedBuffer("OutputReduce3TAD");
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_2) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::INT64);
NDArray y('c', {2, 3}, {1, 2, 3, 4, 5, 6}, sd::DataType::INT64);
NDArray exp('c', {2}, {10, 73}, sd::DataType::FLOAT32);
NDArray z('c', {2}, {100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(),
y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_3) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::INT64);
NDArray y('c', {3}, {1, 2, 3}, sd::DataType::INT64);
NDArray exp('c', {2, 2}, {-22, -4, 14, 32}, sd::DataType::FLOAT32);
NDArray z('c', {2, 2}, {100, 100, 100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_4) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE);
NDArray y('c', {2, 2, 3}, {10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120}, sd::DataType::DOUBLE);
NDArray exp('c', {}, std::vector<double>{1820}, sd::DataType::FLOAT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0, 1, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(
&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr,
y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(), (sd::LongType *)devicePtrs[1],
(sd::LongType *)devicePtrs[2], (sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_1) {
// FIXME: Yurii, this test should be fixed
if (1 > 0) return;
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::INT64);
NDArray exp('c', {}, std::vector<double>{3.605551}, sd::DataType::FLOAT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(),
x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(),
z.specialBuffer(), z.specialShapeInfo(), true);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_2) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -20, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE);
NDArray exp('c', {2}, {3.405877, 9.715966}, sd::DataType::FLOAT32);
NDArray z('c', {2}, {100, 100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {0, 2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo,
shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(
&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int *)devicePtrs[0], dimensions.size(),
(sd::LongType *)devicePtrs[1], (sd::LongType *)devicePtrs[2], true);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
/*
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_3) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-20,-1,0,1,2,3,4,5,6}, sd::DataType::DOUBLE);
NDArray exp('c', {2}, {10.606602, 2.121320}, sd::DataType::FLOAT32);
NDArray z('c', {2}, {100,100}, sd::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 --
xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2
-- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(&lc, sd::variance::SummaryStatsStandardDeviation,
nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.shapeInfo(), z.specialBuffer(), z.special(),
(int*)devicePtrs[0], dimensions.size(),
(sd::LongType*)devicePtrs[1], (sd::LongType*)devicePtrs[2],
true);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
*/
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStatsScalar_1) {
NDArray x('c', {2, 2, 3}, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6}, sd::DataType::INT64);
NDArray exp('c', {}, std::vector<double>{3.605551}, sd::DataType::FLOAT32);
NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void *reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024 * 1024);
ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStatsScalar(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(),
x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(),
z.specialBuffer(), z.specialShapeInfo(), true);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_1) {
// NDArray z('c', {10}, {100,0,0,0,0,0,0,0,0,0}, sd::DataType::DOUBLE);
NDArray z('c', {10}, {100, 0, 0, 0, 0, 0, 0, 0, 0, 100}, sd::DataType::FLOAT32);
NDArray exp('c', {10},
{0.050942, -0.183229, -0.093921, 0.075469, 0.257166, -0.254838, 0.342227, -0.682188, -0.004345, 0.464633},
sd::DataType::FLOAT32);
sd::graph::RandomGenerator gen(119, 5);
cudaError_t cudaResult;
NDArray *array = &z;
ExtraArguments arguments({0.f, 0.5f});
auto context = z.getContext();
PointersManager pm(context, "tests::execRandom_1");
// z.printIndexedBuffer("Input data");
// z.syncToDevice();
NativeOpExecutioner::execRandom(context, random::GaussianDistribution, &gen, array->buffer(), array->shapeInfo(),
array->specialBuffer(), array->specialShapeInfo(), array->buffer(),
array->shapeInfo(), array->specialBuffer(), array->specialShapeInfo(),
array->buffer(), array->shapeInfo(), array->specialBuffer(),
array->specialShapeInfo(), arguments.argumentsAsT(array->dataType()));
pm.synchronize();
z.tickWriteDevice();
// z.printIndexedBuffer("Output Gaussian");
// RandomLauncher::fillGaussian(context, gen, &z, 0.f, 0.5f);
// pm.synchronize();
// z.tickWriteDevice();
// z.printIndexedBuffer("Output Gaussian");
// cudaStream_t stream;
// cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
// LaunchContext lc(&stream);
//
// // ::execRandom(extraPointers, random::GaussianDistribution, &gen, z.buffer(), z.shapeInfo(),
// z.specialBuffer(), z.special(), &extra);
// // call cuda kernel which calculates result
// NativeOpExecutioner::execRandom(&lc, sd::random::GaussianDistribution,
// &gen,
// nullptr, z.shapeInfo(), z.specialBuffer(), z.special(),
// nullptr, z.shapeInfo(), z.specialBuffer(), z.special(),
// nullptr, z.shapeInfo(), z.specialBuffer(), z.special(),
// extraArguments.argumentsAsT(z.dataType()));
//
// cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
// ASSERT_EQ(cudaResult, 0);
// z.tickWriteDevice();
// z.syncToHost();
// z.printIndexedBuffer("Random1");
ASSERT_EQ(exp, z);
// // verify results
// for (int e = 0; e < z.lengthOf(); e++)
// ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// cudaFree(dExtraArgs);
// free allocated global device memory
// cudaFree(dGen);
// delete cuda stream
// cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_2) {
NDArray x('c', {10}, {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, sd::DataType::DOUBLE);
NDArray z('c', {2, 5}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {10}, {0., 0., 0.3, 0., 0.5, 0., 0.7, 0., 0., 1.}, sd::DataType::DOUBLE);
ExtraArguments extraArguments({0.7});
sd::graph::RandomGenerator gen(119, 5);
// // prepare input arrays for prepareDataForCuda function
// std::vector<std::pair<void*,size_t>> hostData;
// hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
// std::vector<void*> devicePtrs(hostData.size(), nullptr);
//
// create cuda stream and LaunchContext
cudaError_t cudaResult;
// cudaStream_t stream;
// cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext *lc = x.getContext(); //(&stream);
// allocate required amount of global device memory and copy host data to it
// cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(lc, sd::random::DropOut, &gen, nullptr, x.shapeInfo(), x.specialBuffer(),
x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
extraArguments.argumentsAsT(z.dataType()));
cudaResult = cudaStreamSynchronize(*lc->getCudaStream());
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
// for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
// cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_3) {
NDArray z('c', {10}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE);
NDArray exp('c', {10},
{2.373649, 2.239791, 1.887353, 2.488636, 2.068904, 2.281399, 1.828228, 2.228222, 2.490847, 1.669537},
sd::DataType::DOUBLE);
std::vector<double> extraArguments = {1.5, 2.5};
sd::graph::RandomGenerator gen(119, 5);
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void *, size_t>> hostData;
hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
std::vector<void *> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData);
ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(&lc, sd::random::UniformDistribution, &gen, nullptr, z.shapeInfo(), z.specialBuffer(),
z.specialShapeInfo(), devicePtrs[0]);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_4) {
NDArray z('c', {2, 5}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, sd::DataType::FLOAT32);
NDArray exp('c', {10},
{2.373649, 2.281399, 2.239791, 1.828228, 1.887353, 2.228222, 2.488636, 2.490847, 2.068904, 1.669537},
sd::DataType::FLOAT32);
z.permutei({1, 0});
ExtraArguments extraArguments({1.5, 2.5});
sd::graph::RandomGenerator gen(119, 5);
// // prepare input arrays for prepareDataForCuda function
// std::vector<std::pair<void*,size_t>> hostData;
// hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
// std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
// cudaError_t cudaResult;
// cudaStream_t stream;
// cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
// LaunchContext lc(&stream);
//
// // allocate required amount of global device memory and copy host data to it
// cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
auto context = z.getContext();
PointersManager pm(context, "execRandom4");
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(context, sd::random::UniformDistribution, &gen, nullptr, z.shapeInfo(),
z.specialBuffer(), z.specialShapeInfo(), extraArguments.argumentsAsT(z.dataType()));
// cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.tickWriteDevice();
// z.printIndexedBuffer("Output Uniform4");
// verify results
for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
// for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
// cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
|
3c98d38e4799a22ba6747f405db613d23426c341.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2018 NVIDIA Corporation. All rights reserved.
#include "PxPhysics.h"
#include "PxVec4.h"
#include "PxVec3.h"
#include "PxVec2.h"
#include "PxMat33.h"
#include "PxStrideIterator.h"
namespace physx
{
template <typename T>
__device__ T* ptrOffset(T* p, PxU32 byteOffset)
{
return (T*)((unsigned char*)(p) + byteOffset);
}
#if __CUDA_ARCH__ < 200
__device__ PxU32 gOffset;
#else
__device__ __shared__ PxU32 gOffset;
#endif
// copies orientations and positions to the destination vertex
// buffer based on the validityBitmap state
extern "C" __global__ void updateInstancedVB(
PxVec3* destPositions,
PxVec3* destRotation0,
PxVec3* destRotation1,
PxVec3* destRotation2,
PxU32 destStride,
const PxVec4* srcPositions,
const PxMat33* srcRotations,
const PxU32* validParticleBitmap,
PxU32 validParticleRange)
{
if (!threadIdx.x)
gOffset = 0;
__syncthreads();
if (validParticleRange)
{
for (PxU32 w=threadIdx.x; w <= (validParticleRange) >> 5; w+=blockDim.x)
{
const PxU32 srcBaseIndex = w << 5;
// reserve space in the output vertex buffer based on
// population count of validity bitmap (avoids excess atomic ops)
PxU32 destIndex = atomicAdd(&gOffset, __popc(validParticleBitmap[w]));
for (PxU32 b=validParticleBitmap[w]; b; b &= b-1)
{
const PxU32 index = srcBaseIndex | __ffs(b)-1;
const PxU32 offset = destIndex*destStride;
*ptrOffset(destRotation0, offset) = srcRotations[index].column0;
*ptrOffset(destRotation1, offset) = srcRotations[index].column1;
*ptrOffset(destRotation2, offset) = srcRotations[index].column2;
PxVec3* p = ptrOffset(destPositions, offset);
p->x = srcPositions[index].x;
p->y = srcPositions[index].y;
p->z = srcPositions[index].z;
++destIndex;
}
}
}
}
// copies positions and alpha to the destination vertex buffer based on
// validity bitmap and particle life times
extern "C" __global__ void updateBillboardVB(
PxVec3* destPositions,
PxU8* destAlphas,
PxU32 destStride,
PxF32 fadingPeriod,
const PxVec4* srcPositions,
const PxReal* srcLifetimes,
const PxU32* validParticleBitmap,
PxU32 validParticleRange)
{
if (!threadIdx.x)
gOffset = 0;
__syncthreads();
if (validParticleRange)
{
for (PxU32 w=threadIdx.x; w <= (validParticleRange) >> 5; w+=blockDim.x)
{
const PxU32 srcBaseIndex = w << 5;
// reserve space in the output vertex buffer based on
// population count of validity bitmap (avoids excess atomic ops)
PxU32 destIndex = atomicAdd(&gOffset, __popc(validParticleBitmap[w]));
for (PxU32 b=validParticleBitmap[w]; b; b &= b-1)
{
PxU32 index = srcBaseIndex | __ffs(b)-1;
const PxU32 offset = destIndex*destStride;
// copy position
PxVec3* p = ptrOffset(destPositions, offset);
p->x = srcPositions[index].x;
p->y = srcPositions[index].y;
p->z = srcPositions[index].z;
// update alpha
if (srcLifetimes)
{
PxU8 lifetime = 0;
if(srcLifetimes[index] >= fadingPeriod)
lifetime = 255;
else
{
if(srcLifetimes[index] <= 0.0f)
lifetime = 0;
else
lifetime = static_cast<PxU8>(srcLifetimes[index] * 255 / fadingPeriod);
}
destAlphas[destIndex*4] = lifetime;
}
++destIndex;
}
}
}
}
} | 3c98d38e4799a22ba6747f405db613d23426c341.cu | //
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2018 NVIDIA Corporation. All rights reserved.
#include "PxPhysics.h"
#include "PxVec4.h"
#include "PxVec3.h"
#include "PxVec2.h"
#include "PxMat33.h"
#include "PxStrideIterator.h"
namespace physx
{
template <typename T>
__device__ T* ptrOffset(T* p, PxU32 byteOffset)
{
return (T*)((unsigned char*)(p) + byteOffset);
}
#if __CUDA_ARCH__ < 200
__device__ PxU32 gOffset;
#else
__device__ __shared__ PxU32 gOffset;
#endif
// copies orientations and positions to the destination vertex
// buffer based on the validityBitmap state
extern "C" __global__ void updateInstancedVB(
PxVec3* destPositions,
PxVec3* destRotation0,
PxVec3* destRotation1,
PxVec3* destRotation2,
PxU32 destStride,
const PxVec4* srcPositions,
const PxMat33* srcRotations,
const PxU32* validParticleBitmap,
PxU32 validParticleRange)
{
if (!threadIdx.x)
gOffset = 0;
__syncthreads();
if (validParticleRange)
{
for (PxU32 w=threadIdx.x; w <= (validParticleRange) >> 5; w+=blockDim.x)
{
const PxU32 srcBaseIndex = w << 5;
// reserve space in the output vertex buffer based on
// population count of validity bitmap (avoids excess atomic ops)
PxU32 destIndex = atomicAdd(&gOffset, __popc(validParticleBitmap[w]));
for (PxU32 b=validParticleBitmap[w]; b; b &= b-1)
{
const PxU32 index = srcBaseIndex | __ffs(b)-1;
const PxU32 offset = destIndex*destStride;
*ptrOffset(destRotation0, offset) = srcRotations[index].column0;
*ptrOffset(destRotation1, offset) = srcRotations[index].column1;
*ptrOffset(destRotation2, offset) = srcRotations[index].column2;
PxVec3* p = ptrOffset(destPositions, offset);
p->x = srcPositions[index].x;
p->y = srcPositions[index].y;
p->z = srcPositions[index].z;
++destIndex;
}
}
}
}
// copies positions and alpha to the destination vertex buffer based on
// validity bitmap and particle life times
extern "C" __global__ void updateBillboardVB(
PxVec3* destPositions,
PxU8* destAlphas,
PxU32 destStride,
PxF32 fadingPeriod,
const PxVec4* srcPositions,
const PxReal* srcLifetimes,
const PxU32* validParticleBitmap,
PxU32 validParticleRange)
{
if (!threadIdx.x)
gOffset = 0;
__syncthreads();
if (validParticleRange)
{
for (PxU32 w=threadIdx.x; w <= (validParticleRange) >> 5; w+=blockDim.x)
{
const PxU32 srcBaseIndex = w << 5;
// reserve space in the output vertex buffer based on
// population count of validity bitmap (avoids excess atomic ops)
PxU32 destIndex = atomicAdd(&gOffset, __popc(validParticleBitmap[w]));
for (PxU32 b=validParticleBitmap[w]; b; b &= b-1)
{
PxU32 index = srcBaseIndex | __ffs(b)-1;
const PxU32 offset = destIndex*destStride;
// copy position
PxVec3* p = ptrOffset(destPositions, offset);
p->x = srcPositions[index].x;
p->y = srcPositions[index].y;
p->z = srcPositions[index].z;
// update alpha
if (srcLifetimes)
{
PxU8 lifetime = 0;
if(srcLifetimes[index] >= fadingPeriod)
lifetime = 255;
else
{
if(srcLifetimes[index] <= 0.0f)
lifetime = 0;
else
lifetime = static_cast<PxU8>(srcLifetimes[index] * 255 / fadingPeriod);
}
destAlphas[destIndex*4] = lifetime;
}
++destIndex;
}
}
}
}
} |
4c96cf7acb405f271563844ff79093b33b9f8d3a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string>
#include <cstring>
#include <vector>
#include <cmath>
#include "test_opt_utils.cuh"
#include "graph_utils.cuh"
//#define ENABLE_LOG true
#define ENALBE_LOUVAIN true
#include "nvlouvain.cuh"
#include "gtest/gtest.h"
#include "high_res_clock.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
using T = double;
int main(int argc, char* argv[]){
if(argc < 2)
{
std::cout<< "Help : ./louvain_test matrix_market_file.mtx"<<std::endl;
return 1;
}
FILE* fin = std::fopen( argv[1] ,"r");
int m, k, nnz;
MM_typecode mc;
CUDA_CALL(hipSetDevice(0));
EXPECT_EQ((mm_properties<int>(fin, 1, &mc, &m, &k, &nnz)) ,0);
EXPECT_EQ(m,k);
thrust::host_vector<int> coo_ind_h(nnz);
thrust::host_vector<int> csr_ptr_h(m+1);
thrust::host_vector<int> csr_ind_h(nnz);
thrust::host_vector<T> csr_val_h(nnz);
EXPECT_EQ( (mm_to_coo<int,T>(fin, 1, nnz, &coo_ind_h[0], &csr_ind_h[0], &csr_val_h[0], NULL)), 0);
EXPECT_EQ( (coo_to_csr<int,T> (m, k, nnz, &coo_ind_h[0], &csr_ind_h[0], &csr_val_h[0], NULL, &csr_ptr_h[0], NULL, NULL, NULL)), 0);
EXPECT_EQ(fclose(fin),0);
thrust::device_vector<int> csr_ptr_d(csr_ptr_h);
thrust::device_vector<int> csr_ind_d(csr_ind_h);
thrust::device_vector<T> csr_val_d(csr_val_h);
thrust::device_vector<T> tmp_1(nnz);
thrust::fill(thrust::hip::par, tmp_1.begin(), tmp_1.end(), 1.0);
thrust::device_vector<T>::iterator max_ele = thrust::max_element(thrust::hip::par, csr_val_d.begin(), csr_val_d.end());
bool weighted = (*max_ele!=1.0);
//std::cout<<(weighted?"Weighted ":"Not Weigthed ")<<" n_vertex: "<<m<<"\n";
HighResClock hr_clock;
double louvain_time;
if(ENALBE_LOUVAIN){
T final_modulartiy(0);
//bool record = true;
bool has_init_cluster = false;
thrust::device_vector<int> cluster_d(m, 0);
std::vector< std::vector<int> > best_cluster_vec;
int* csr_ptr_ptr = thrust::raw_pointer_cast(csr_ptr_d.data());
int* csr_ind_ptr = thrust::raw_pointer_cast(csr_ind_d.data());
T* csr_val_ptr = thrust::raw_pointer_cast(csr_val_d.data());
int* init_cluster_ptr = thrust::raw_pointer_cast(cluster_d.data());
int num_level;
hipProfilerStart();
hr_clock.start();
nvlouvain::louvain<int,T>(csr_ptr_ptr, csr_ind_ptr, csr_val_ptr,
m, nnz,
weighted, has_init_cluster,
init_cluster_ptr, final_modulartiy, best_cluster_vec, num_level);
hr_clock.stop(&louvain_time);
hipProfilerStop();
std::cout<<"Final modularity: "<<COLOR_MGT<<final_modulartiy<<COLOR_WHT<<" num_level: "<<num_level<<std::endl;
std::cout<<"louvain total runtime:"<<louvain_time/1000<<" ms\n";
//for (size_t i = 0; i < best_cluster_vec.size(); i++)
//{
// for(std::vector<int>::iterator it = best_cluster_vec[i].begin(); it != best_cluster_vec[i].end(); ++it)
// std::cout << *it <<' ';
// std::cout << std::endl;
//}
}
return 0;
}
| 4c96cf7acb405f271563844ff79093b33b9f8d3a.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string>
#include <cstring>
#include <vector>
#include <cmath>
#include "test_opt_utils.cuh"
#include "graph_utils.cuh"
//#define ENABLE_LOG true
#define ENALBE_LOUVAIN true
#include "nvlouvain.cuh"
#include "gtest/gtest.h"
#include "high_res_clock.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
using T = double;
int main(int argc, char* argv[]){
if(argc < 2)
{
std::cout<< "Help : ./louvain_test matrix_market_file.mtx"<<std::endl;
return 1;
}
FILE* fin = std::fopen( argv[1] ,"r");
int m, k, nnz;
MM_typecode mc;
CUDA_CALL(cudaSetDevice(0));
EXPECT_EQ((mm_properties<int>(fin, 1, &mc, &m, &k, &nnz)) ,0);
EXPECT_EQ(m,k);
thrust::host_vector<int> coo_ind_h(nnz);
thrust::host_vector<int> csr_ptr_h(m+1);
thrust::host_vector<int> csr_ind_h(nnz);
thrust::host_vector<T> csr_val_h(nnz);
EXPECT_EQ( (mm_to_coo<int,T>(fin, 1, nnz, &coo_ind_h[0], &csr_ind_h[0], &csr_val_h[0], NULL)), 0);
EXPECT_EQ( (coo_to_csr<int,T> (m, k, nnz, &coo_ind_h[0], &csr_ind_h[0], &csr_val_h[0], NULL, &csr_ptr_h[0], NULL, NULL, NULL)), 0);
EXPECT_EQ(fclose(fin),0);
thrust::device_vector<int> csr_ptr_d(csr_ptr_h);
thrust::device_vector<int> csr_ind_d(csr_ind_h);
thrust::device_vector<T> csr_val_d(csr_val_h);
thrust::device_vector<T> tmp_1(nnz);
thrust::fill(thrust::cuda::par, tmp_1.begin(), tmp_1.end(), 1.0);
thrust::device_vector<T>::iterator max_ele = thrust::max_element(thrust::cuda::par, csr_val_d.begin(), csr_val_d.end());
bool weighted = (*max_ele!=1.0);
//std::cout<<(weighted?"Weighted ":"Not Weigthed ")<<" n_vertex: "<<m<<"\n";
HighResClock hr_clock;
double louvain_time;
if(ENALBE_LOUVAIN){
T final_modulartiy(0);
//bool record = true;
bool has_init_cluster = false;
thrust::device_vector<int> cluster_d(m, 0);
std::vector< std::vector<int> > best_cluster_vec;
int* csr_ptr_ptr = thrust::raw_pointer_cast(csr_ptr_d.data());
int* csr_ind_ptr = thrust::raw_pointer_cast(csr_ind_d.data());
T* csr_val_ptr = thrust::raw_pointer_cast(csr_val_d.data());
int* init_cluster_ptr = thrust::raw_pointer_cast(cluster_d.data());
int num_level;
cudaProfilerStart();
hr_clock.start();
nvlouvain::louvain<int,T>(csr_ptr_ptr, csr_ind_ptr, csr_val_ptr,
m, nnz,
weighted, has_init_cluster,
init_cluster_ptr, final_modulartiy, best_cluster_vec, num_level);
hr_clock.stop(&louvain_time);
cudaProfilerStop();
std::cout<<"Final modularity: "<<COLOR_MGT<<final_modulartiy<<COLOR_WHT<<" num_level: "<<num_level<<std::endl;
std::cout<<"louvain total runtime:"<<louvain_time/1000<<" ms\n";
//for (size_t i = 0; i < best_cluster_vec.size(); i++)
//{
// for(std::vector<int>::iterator it = best_cluster_vec[i].begin(); it != best_cluster_vec[i].end(); ++it)
// std::cout << *it <<' ';
// std::cout << std::endl;
//}
}
return 0;
}
|
2e1ed4af5365eb862c0f58d859dab581190d58fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void neg_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = -dy[i];
}
} | 2e1ed4af5365eb862c0f58d859dab581190d58fa.cu | #include "includes.h"
__global__ void neg_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = -dy[i];
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.