hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
165a686a4aa92cdadb53405ec929534d07870680.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T>
float ComputeScale(T in_size, T out_size, bool align_corners) {
if (align_corners) {
return (float)(in_size - T(1)) / (float)(out_size - T(1));
} else {
return (float)in_size / (float)out_size;
}
}
template <typename T>
__device__ float TransformCoordinate(
const T coord_resized,
const float scale,
const bool align_corners) {
if (align_corners) {
return coord_resized * scale;
} else {
float coord_original = (coord_resized + 0.5f) * scale - 0.5f;
return coord_original < 0.f ? 0.f : coord_original;
}
}
template <typename T>
__global__ void _ResizeLinear2dNCHW(
const int nthreads,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const bool align_corners,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int w_out = yi % out_w;
const int h_out = (yi / out_w) % out_h;
const int c = (yi / out_w / out_h) % C;
const int n = yi / out_w / out_w / C;
const float h = TransformCoordinate(h_out, scale_h, align_corners);
const float w = TransformCoordinate(w_out, scale_w, align_corners);
const int ti = floorf(h);
const int li = floorf(w);
const int bi = h < H - 1 ? ceilf(h) : H - 1;
const int ri = w < W - 1 ? ceilf(w) : W - 1;
const int offset = (n * C + c) * H;
const float tl = convert::To<float>(__ldg(x + (offset + ti) * W + li));
const float tr = convert::To<float>(__ldg(x + (offset + ti) * W + ri));
const float bl = convert::To<float>(__ldg(x + (offset + bi) * W + li));
const float br = convert::To<float>(__ldg(x + (offset + bi) * W + ri));
const float v = h - ti;
const float u = w - li;
const float t = tl + (tr - tl) * u;
const float b = bl + (br - bl) * u;
y[yi] = convert::To<T>(t + (b - t) * v);
}
}
template <typename T>
__global__ void _ResizeLinear2dNHWC(
const int nthreads,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const bool align_corners,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int c = yi % C;
const int w_out = (yi / C) % out_w;
const int h_out = (yi / C / out_w) % out_h;
const int n = yi / C / out_w / out_h;
const float h = TransformCoordinate(h_out, scale_h, align_corners);
const float w = TransformCoordinate(w_out, scale_w, align_corners);
const int ti = floorf(h);
const int li = floorf(w);
const int bi = h < H - 1 ? ceilf(h) : H - 1;
const int ri = w < W - 1 ? ceilf(w) : W - 1;
const int offset = n * H * W * C + c;
const float tl = convert::To<float>(__ldg(x + offset + (ti * W + li) * C));
const float tr = convert::To<float>(__ldg(x + offset + (ti * W + ri) * C));
const float bl = convert::To<float>(__ldg(x + offset + (bi * W + li) * C));
const float br = convert::To<float>(__ldg(x + offset + (bi * W + ri) * C));
const float v = h - ti;
const float u = w - li;
const float t = tl + (tr - tl) * u;
const float b = bl + (br - bl) * u;
y[yi] = convert::To<T>(t + (b - t) * v);
}
}
template <typename T>
__global__ void _ResizeLinear2dGradNCHW(
const int nthreads,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const bool align_corners,
const T* dy,
float* dx) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int w_out = yi % out_w;
const int h_out = (yi / out_w) % out_h;
const int c = (yi / out_w / out_h) % C;
const int n = yi / out_w / out_w / C;
const float h = TransformCoordinate(h_out, scale_h, align_corners);
const float w = TransformCoordinate(w_out, scale_w, align_corners);
const int ti = floorf(h);
const int li = floorf(w);
const int bi = h < H - 1 ? ceilf(h) : H - 1;
const int ri = w < W - 1 ? ceilf(w) : W - 1;
const int offset = (n * C + c) * H;
const float v = h - ti;
const float u = w - li;
const float dt = (1.f - v) * convert::To<float>(__ldg(dy + yi));
const float db = v * convert::To<float>(__ldg(dy + yi));
math::utils::AtomicAdd(&dx[(offset + ti) * W + li], (1.f - u) * dt);
math::utils::AtomicAdd(&dx[(offset + ti) * W + ri], u * dt);
math::utils::AtomicAdd(&dx[(offset + bi) * W + li], (1.f - u) * db);
math::utils::AtomicAdd(&dx[(offset + bi) * W + ri], u * db);
}
}
template <typename T>
__global__ void _ResizeLinear2dGradNHWC(
const int nthreads,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const bool align_corners,
const T* dy,
float* dx) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int c = yi % C;
const int w_out = (yi / C) % out_w;
const int h_out = (yi / C / out_w) % out_h;
const int n = yi / C / out_w / out_h;
const float h = TransformCoordinate(h_out, scale_h, align_corners);
const float w = TransformCoordinate(w_out, scale_w, align_corners);
const int ti = floorf(h);
const int li = floorf(w);
const int bi = h < H - 1 ? ceilf(h) : H - 1;
const int ri = w < W - 1 ? ceilf(w) : W - 1;
const int offset = n * H * W * C + c;
const float v = h - ti;
const float u = w - li;
const float dt = (1.f - v) * convert::To<float>(__ldg(dy + yi));
const float db = v * convert::To<float>(__ldg(dy + yi));
math::utils::AtomicAdd(&dx[offset + (ti * W + li) * C], (1.f - u) * dt);
math::utils::AtomicAdd(&dx[offset + (ti * W + ri) * C], u * dt);
math::utils::AtomicAdd(&dx[offset + (bi * W + li) * C], (1.f - u) * db);
math::utils::AtomicAdd(&dx[offset + (bi * W + ri) * C], u * db);
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DISPATCH_RESIZE_KERNEL(name, T, kBlocks, kThreads, ...) \
if (data_format == "NCHW") { \
hipLaunchKernelGGL(( name##NCHW), dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \
} else if (data_format == "NHWC") { \
hipLaunchKernelGGL(( name##NHWC), dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \
} else { \
LOG(FATAL) << "Unknown DataFormat: " << data_format; \
}
#define DEFINE_KERNEL_LAUNCHER(name, kBackward, InputT, OutputT) \
template <> \
void name<InputT, CUDAContext>( \
const int N, \
const int C, \
const int H, \
const int W, \
const int out_h, \
const int out_w, \
const bool align_corners, \
const string& data_format, \
const InputT* x, \
OutputT* y, \
CUDAContext* ctx) { \
auto nthreads = N * C * out_h * out_w; \
if (kBackward) { \
math::Set(N* C* H* W, convert::To<OutputT>(0.f), y, ctx); \
} \
DISPATCH_RESIZE_KERNEL( \
_##name, \
math::ScalarType<InputT>::type, \
CUDA_BLOCKS(nthreads), \
CUDA_THREADS, \
nthreads, \
C, \
H, \
W, \
out_h, \
out_w, \
ComputeScale(H, out_h, align_corners), \
ComputeScale(W, out_w, align_corners), \
align_corners, \
reinterpret_cast<const math::ScalarType<InputT>::type*>(x), \
reinterpret_cast<math::ScalarType<OutputT>::type*>(y)); \
}
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, uint8_t, uint8_t);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, int8_t, int8_t);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, int, int);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, int64_t, int64_t);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, float16, float16);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, float, float);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, double, double);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2dGrad, true, float16, float); // Grad
DEFINE_KERNEL_LAUNCHER(ResizeLinear2dGrad, true, float, float); // Grad
DEFINE_KERNEL_LAUNCHER(ResizeLinear2dGrad, true, double, float); // Grad
#undef DEFINE_KERNEL_LAUNCHER
#undef DISPATCH_RESIZE_KERNEL
} // namespace kernels
} // namespace dragon
#endif // USE_ROCM
|
165a686a4aa92cdadb53405ec929534d07870680.cu
|
#ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T>
float ComputeScale(T in_size, T out_size, bool align_corners) {
if (align_corners) {
return (float)(in_size - T(1)) / (float)(out_size - T(1));
} else {
return (float)in_size / (float)out_size;
}
}
template <typename T>
__device__ float TransformCoordinate(
const T coord_resized,
const float scale,
const bool align_corners) {
if (align_corners) {
return coord_resized * scale;
} else {
float coord_original = (coord_resized + 0.5f) * scale - 0.5f;
return coord_original < 0.f ? 0.f : coord_original;
}
}
template <typename T>
__global__ void _ResizeLinear2dNCHW(
const int nthreads,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const bool align_corners,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int w_out = yi % out_w;
const int h_out = (yi / out_w) % out_h;
const int c = (yi / out_w / out_h) % C;
const int n = yi / out_w / out_w / C;
const float h = TransformCoordinate(h_out, scale_h, align_corners);
const float w = TransformCoordinate(w_out, scale_w, align_corners);
const int ti = floorf(h);
const int li = floorf(w);
const int bi = h < H - 1 ? ceilf(h) : H - 1;
const int ri = w < W - 1 ? ceilf(w) : W - 1;
const int offset = (n * C + c) * H;
const float tl = convert::To<float>(__ldg(x + (offset + ti) * W + li));
const float tr = convert::To<float>(__ldg(x + (offset + ti) * W + ri));
const float bl = convert::To<float>(__ldg(x + (offset + bi) * W + li));
const float br = convert::To<float>(__ldg(x + (offset + bi) * W + ri));
const float v = h - ti;
const float u = w - li;
const float t = tl + (tr - tl) * u;
const float b = bl + (br - bl) * u;
y[yi] = convert::To<T>(t + (b - t) * v);
}
}
template <typename T>
__global__ void _ResizeLinear2dNHWC(
const int nthreads,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const bool align_corners,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int c = yi % C;
const int w_out = (yi / C) % out_w;
const int h_out = (yi / C / out_w) % out_h;
const int n = yi / C / out_w / out_h;
const float h = TransformCoordinate(h_out, scale_h, align_corners);
const float w = TransformCoordinate(w_out, scale_w, align_corners);
const int ti = floorf(h);
const int li = floorf(w);
const int bi = h < H - 1 ? ceilf(h) : H - 1;
const int ri = w < W - 1 ? ceilf(w) : W - 1;
const int offset = n * H * W * C + c;
const float tl = convert::To<float>(__ldg(x + offset + (ti * W + li) * C));
const float tr = convert::To<float>(__ldg(x + offset + (ti * W + ri) * C));
const float bl = convert::To<float>(__ldg(x + offset + (bi * W + li) * C));
const float br = convert::To<float>(__ldg(x + offset + (bi * W + ri) * C));
const float v = h - ti;
const float u = w - li;
const float t = tl + (tr - tl) * u;
const float b = bl + (br - bl) * u;
y[yi] = convert::To<T>(t + (b - t) * v);
}
}
template <typename T>
__global__ void _ResizeLinear2dGradNCHW(
const int nthreads,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const bool align_corners,
const T* dy,
float* dx) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int w_out = yi % out_w;
const int h_out = (yi / out_w) % out_h;
const int c = (yi / out_w / out_h) % C;
const int n = yi / out_w / out_w / C;
const float h = TransformCoordinate(h_out, scale_h, align_corners);
const float w = TransformCoordinate(w_out, scale_w, align_corners);
const int ti = floorf(h);
const int li = floorf(w);
const int bi = h < H - 1 ? ceilf(h) : H - 1;
const int ri = w < W - 1 ? ceilf(w) : W - 1;
const int offset = (n * C + c) * H;
const float v = h - ti;
const float u = w - li;
const float dt = (1.f - v) * convert::To<float>(__ldg(dy + yi));
const float db = v * convert::To<float>(__ldg(dy + yi));
math::utils::AtomicAdd(&dx[(offset + ti) * W + li], (1.f - u) * dt);
math::utils::AtomicAdd(&dx[(offset + ti) * W + ri], u * dt);
math::utils::AtomicAdd(&dx[(offset + bi) * W + li], (1.f - u) * db);
math::utils::AtomicAdd(&dx[(offset + bi) * W + ri], u * db);
}
}
template <typename T>
__global__ void _ResizeLinear2dGradNHWC(
const int nthreads,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const bool align_corners,
const T* dy,
float* dx) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int c = yi % C;
const int w_out = (yi / C) % out_w;
const int h_out = (yi / C / out_w) % out_h;
const int n = yi / C / out_w / out_h;
const float h = TransformCoordinate(h_out, scale_h, align_corners);
const float w = TransformCoordinate(w_out, scale_w, align_corners);
const int ti = floorf(h);
const int li = floorf(w);
const int bi = h < H - 1 ? ceilf(h) : H - 1;
const int ri = w < W - 1 ? ceilf(w) : W - 1;
const int offset = n * H * W * C + c;
const float v = h - ti;
const float u = w - li;
const float dt = (1.f - v) * convert::To<float>(__ldg(dy + yi));
const float db = v * convert::To<float>(__ldg(dy + yi));
math::utils::AtomicAdd(&dx[offset + (ti * W + li) * C], (1.f - u) * dt);
math::utils::AtomicAdd(&dx[offset + (ti * W + ri) * C], u * dt);
math::utils::AtomicAdd(&dx[offset + (bi * W + li) * C], (1.f - u) * db);
math::utils::AtomicAdd(&dx[offset + (bi * W + ri) * C], u * db);
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DISPATCH_RESIZE_KERNEL(name, T, kBlocks, kThreads, ...) \
if (data_format == "NCHW") { \
name##NCHW<<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \
} else if (data_format == "NHWC") { \
name##NHWC<<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \
} else { \
LOG(FATAL) << "Unknown DataFormat: " << data_format; \
}
#define DEFINE_KERNEL_LAUNCHER(name, kBackward, InputT, OutputT) \
template <> \
void name<InputT, CUDAContext>( \
const int N, \
const int C, \
const int H, \
const int W, \
const int out_h, \
const int out_w, \
const bool align_corners, \
const string& data_format, \
const InputT* x, \
OutputT* y, \
CUDAContext* ctx) { \
auto nthreads = N * C * out_h * out_w; \
if (kBackward) { \
math::Set(N* C* H* W, convert::To<OutputT>(0.f), y, ctx); \
} \
DISPATCH_RESIZE_KERNEL( \
_##name, \
math::ScalarType<InputT>::type, \
CUDA_BLOCKS(nthreads), \
CUDA_THREADS, \
nthreads, \
C, \
H, \
W, \
out_h, \
out_w, \
ComputeScale(H, out_h, align_corners), \
ComputeScale(W, out_w, align_corners), \
align_corners, \
reinterpret_cast<const math::ScalarType<InputT>::type*>(x), \
reinterpret_cast<math::ScalarType<OutputT>::type*>(y)); \
}
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, uint8_t, uint8_t);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, int8_t, int8_t);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, int, int);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, int64_t, int64_t);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, float16, float16);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, float, float);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2d, false, double, double);
DEFINE_KERNEL_LAUNCHER(ResizeLinear2dGrad, true, float16, float); // Grad
DEFINE_KERNEL_LAUNCHER(ResizeLinear2dGrad, true, float, float); // Grad
DEFINE_KERNEL_LAUNCHER(ResizeLinear2dGrad, true, double, float); // Grad
#undef DEFINE_KERNEL_LAUNCHER
#undef DISPATCH_RESIZE_KERNEL
} // namespace kernels
} // namespace dragon
#endif // USE_CUDA
|
deccadefae78f475d00a2b3b63735b7f8c16df5d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "helper_image.h"
#include "kernel.h"
#include <stdio.h>
#include <time.h>
#include <string>
#define C2IDX(i, j, w) (( i ) * ( w ) + ( j ))
const bool saveImages = true;
const size_t channels = 3;
// - ,
__constant__ unsigned char gdivisor[1];
__constant__ unsigned char goffset[1];
unsigned char divisor = 1;
unsigned char offset = 0;
const size_t threadsX = 32, threadsY = 8;
unsigned char FILTER5_CPU[] = {0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0 };
const size_t FILTER5_SIZE = sizeof(FILTER5_CPU) / sizeof(unsigned char);
__constant__ unsigned char FILTER5_GPU[FILTER5_SIZE];
unsigned char FILTER3_CPU[] = { 0, 0, 0,
0, 1, 0,
0, 0, 0 };
const size_t FILTER3_SIZE = sizeof(FILTER3_CPU) / sizeof(unsigned char);
__constant__ unsigned char FILTER3_GPU[FILTER3_SIZE];
// GPU CPU
size_t verify(const unsigned char * a, const unsigned char * gold, const size_t len) {
for (size_t i = 0; i < len; ++i) if (a[i] != gold[i]) return i;
return -1;
}
void expandBoundaries(unsigned char *dst, const unsigned char *src, const size_t w, const size_t h, const size_t we, const size_t he) {
const size_t halfDim = (we - w) / 2;
for (size_t ip = 0, i = 0; ip < he && i < h; ip < halfDim || ip >= he - 2*halfDim ? i : ++i, ++ip)
for (size_t jp = 0, j = 0; jp < we && j < w; jp < halfDim || jp >= we - 2*halfDim ? j : ++j, ++jp)
for (size_t k = 0; k < channels; ++k)
dst[C2IDX(ip, jp * channels + k, we * channels)] = src[C2IDX(i, j * channels + k, w * channels)];
}
unsigned char applyFilterCPU(unsigned char *seq, const unsigned char *filter, const size_t size) {
unsigned int result = 0;
for (size_t i = 0; i < size; ++i)
result += seq[i] * filter[i];
result = result / divisor + offset;
return (unsigned char) result;
}
void filter3CPU(unsigned char *dst, const unsigned char *src, const size_t w, const size_t h) {
const size_t we = w + 2;
for (size_t i = 0, ip = 1; i < h; ++i, ++ip) {
for (size_t j = 0, jp = 1; j < w; ++j, ++jp) {
for (size_t c = 0; c < channels; ++c) {
unsigned char temp[FILTER3_SIZE] = {
src[C2IDX(ip - 1, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip - 1, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip - 1, (jp + 1) * channels + c, we * channels)],
src[C2IDX(ip + 0, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip + 0, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip + 0, (jp + 1) * channels + c, we * channels)],
src[C2IDX(ip + 1, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip + 1, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip + 1, (jp + 1) * channels + c, we * channels)]
};
dst[C2IDX(i, j * channels + c, w * channels)] = applyFilterCPU(temp, FILTER3_CPU, FILTER3_SIZE);
}
}
}
}
void filter5CPU(unsigned char *dst, const unsigned char *src, const size_t w, const size_t h) {
const size_t we = w + 4;
for (size_t i = 0, ip = 2; i < h; ++i, ++ip) {
for (size_t j = 0, jp = 2; j < w; ++j, ++jp) {
for (size_t c = 0; c < channels; ++c) {
unsigned char temp[FILTER5_SIZE] = {
src[C2IDX(ip - 2, (jp - 2) * channels + c, we * channels)], src[C2IDX(ip - 2, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip - 2, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip - 2, (jp + 1) * channels + c, we * channels)],src[C2IDX(ip - 2, (jp + 2) * channels + c, we * channels)],
src[C2IDX(ip - 1, (jp - 2) * channels + c, we * channels)], src[C2IDX(ip - 1, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip - 1, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip - 1, (jp + 1) * channels + c, we * channels)],src[C2IDX(ip - 1, (jp + 2) * channels + c, we * channels)],
src[C2IDX(ip + 0, (jp - 2) * channels + c, we * channels)], src[C2IDX(ip + 0, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip + 0, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip + 0, (jp + 1) * channels + c, we * channels)],src[C2IDX(ip + 0, (jp + 2) * channels + c, we * channels)],
src[C2IDX(ip + 1, (jp - 2) * channels + c, we * channels)], src[C2IDX(ip + 1, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip + 1, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip + 1, (jp + 1) * channels + c, we * channels)],src[C2IDX(ip + 1, (jp + 2) * channels + c, we * channels)],
src[C2IDX(ip + 2, (jp - 2) * channels + c, we * channels)], src[C2IDX(ip + 2, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip + 2, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip + 2, (jp + 1) * channels + c, we * channels)],src[C2IDX(ip + 2, (jp + 2) * channels + c, we * channels)]
};
dst[C2IDX(i, j * channels + c, w * channels)] = applyFilterCPU(temp, FILTER5_CPU, FILTER5_SIZE);
}
}
}
}
__device__ unsigned char applyFilterGPU(unsigned char* seq, const unsigned char* filter, const size_t size) {
unsigned int result = 0;
for (size_t i = 0; i < size; ++i)
result += seq[i] * filter[i];
result = result / gdivisor[0] + goffset[0];
return unsigned char(result);
}
__global__ void filter3GPU(int *dst, const int *src, const size_t pitchDst, const size_t pitchSrc, const size_t h) {
__shared__ int smemIn[threadsY + 2][(threadsX)* channels + 2];
__shared__ int smemOut[threadsY][(threadsX)* channels];
size_t Y = blockIdx.y * blockDim.y + threadIdx.y;
size_t X = (blockIdx.x * blockDim.x) * channels + threadIdx.x;
if (X < pitchSrc && Y < h + 2) {
//
for (size_t i = 0; i < channels; ++i)
smemIn[threadIdx.y][threadIdx.x + threadsX * i] = src[C2IDX(Y, X + threadsX * i, pitchSrc)];
// 2
if (threadIdx.x <= 1)
smemIn[threadIdx.y][threadIdx.x + threadsX * channels] = src[C2IDX(Y, X + threadsX * channels, pitchSrc)];
// 2
if (threadIdx.y >= blockDim.y - 2 && Y + 2 < h + 2) {
for (size_t i = 0; i < channels; ++i)
smemIn[threadIdx.y + 2][threadIdx.x + threadsX * i] = src[C2IDX(Y + 2, X + threadsX * i, pitchSrc)];
// 2
if (threadIdx.x < 2)
smemIn[threadIdx.y + 2][threadIdx.x + threadsX * channels] = src[C2IDX(Y + 2, X + threadsX * channels, pitchSrc)];
}
}
// , ,
//
__syncthreads();
// 4
// + 1
unsigned char data[3][6 * channels];
for (size_t i = 0; i < 3; ++i) {
memcpy(data[i], &smemIn[threadIdx.y + i][threadIdx.x * channels], 6 * channels);
}
// - 4 , - 3
// 4 * 3
char result[4 * channels] = { 0 };
for (size_t i = 0; i < 4; ++i) {
for (size_t c = 0; c < channels; ++c) {
unsigned char temp[9] = {
data[0][(i + 0) * channels + c], data[0][(i + 1) * channels + c], data[0][(i + 2) * channels + c],
data[1][(i + 0) * channels + c], data[1][(i + 1) * channels + c], data[1][(i + 2) * channels + c],
data[2][(i + 0) * channels + c], data[2][(i + 1) * channels + c], data[2][(i + 2) * channels + c]
};
result[i * channels + c] = applyFilterGPU(temp, FILTER3_GPU, FILTER3_SIZE);
}
}
memcpy(&smemOut[threadIdx.y][threadIdx.x * channels], result, 4 * channels);
if (X < pitchDst && Y < h)
for (size_t i = 0; i < channels; ++i)
dst[C2IDX(Y, X + threadsX * i, pitchDst)] = smemOut[threadIdx.y][threadsX * i + threadIdx.x];
}
__global__ void filter5GPU(int *dst, const int *src, const size_t pitchDst, const size_t pitchSrc, const size_t h) {
__shared__ int smemIn[threadsY + 4][(threadsX)* channels + 4];
__shared__ int smemOut[threadsY][(threadsX)* channels];
size_t Y = blockIdx.y * blockDim.y + threadIdx.y;
size_t X = (blockIdx.x * blockDim.x) * channels + threadIdx.x;
if (X < pitchSrc && Y < h + 4) {
//
for (size_t i = 0; i < channels; ++i)
smemIn[threadIdx.y][threadIdx.x + threadsX * i] = src[C2IDX(Y, X + threadsX * i, pitchSrc)];
// 4
if (threadIdx.x < 4)
smemIn[threadIdx.y][threadIdx.x + threadsX * channels] = src[C2IDX(Y, X + threadsX * channels, pitchSrc)];
// 4
if (threadIdx.y >= blockDim.y - 4 && Y + 4 < h + 4) {
for (size_t i = 0; i < channels; ++i)
smemIn[threadIdx.y + 4][threadIdx.x + threadsX * i] = src[C2IDX(Y + 4, X + threadsX * i, pitchSrc)];
// 4
if (threadIdx.x < 4)
smemIn[threadIdx.y + 4][threadIdx.x + threadsX * channels] = src[C2IDX(Y + 4, X + threadsX * channels, pitchSrc)];
}
}
// , ,
//
__syncthreads();
// 4
// + 2
unsigned char data[5][8 * channels];
for (size_t i = 0; i < 5; ++i) {
memcpy(data[i], &smemIn[threadIdx.y + i][threadIdx.x * channels], 8 * channels);
}
// - 4 , - 3
// 4 * 3
char result[4 * channels] = { 0 };
for (size_t i = 0; i < 4; ++i) {
for (size_t c = 0; c < channels; ++c) {
unsigned char temp[] = {
data[0][(i + 0) * channels + c], data[0][(i + 1) * channels + c], data[0][(i + 2) * channels + c], data[0][(i + 3) * channels + c],data[0][(i + 4) * channels + c],
data[1][(i + 0) * channels + c], data[1][(i + 1) * channels + c], data[1][(i + 2) * channels + c], data[1][(i + 3) * channels + c],data[1][(i + 4) * channels + c],
data[2][(i + 0) * channels + c], data[2][(i + 1) * channels + c], data[2][(i + 2) * channels + c], data[2][(i + 3) * channels + c],data[2][(i + 4) * channels + c],
data[3][(i + 0) * channels + c], data[3][(i + 1) * channels + c], data[3][(i + 2) * channels + c], data[3][(i + 3) * channels + c],data[3][(i + 4) * channels + c],
data[4][(i + 0) * channels + c], data[4][(i + 1) * channels + c], data[4][(i + 2) * channels + c], data[4][(i + 3) * channels + c],data[4][(i + 4) * channels + c],
};
result[i * channels + c] = applyFilterGPU(temp, FILTER5_GPU, FILTER5_SIZE);
}
}
memcpy(&smemOut[threadIdx.y][threadIdx.x * channels], result, 4 * channels);
if (X < pitchDst && Y < h)
for (size_t i = 0; i < channels; ++i)
dst[C2IDX(Y, X + threadsX * i, pitchDst)] = smemOut[threadIdx.y][threadsX * i + threadIdx.x];
}
extern "C" void __declspec(dllexport) runFilter3(unsigned char* filter, unsigned char divisor_,
unsigned char offset_, const char* cFileame, const unsigned char compare)
{
std::string filename(cFileame);
unsigned char *hOrigin = NULL, *hSrc = NULL, *hDst = NULL, *hResult = NULL, *dSrc = NULL, *dDst = NULL;
unsigned int w, h, c, we, he;
size_t pitchSrc, pitchDst;
for (size_t i = 0; i < FILTER3_SIZE; ++i)
{
FILTER3_CPU[i] = filter[i];
}
divisor = divisor_;
offset = offset_;
__loadPPM(filename.c_str(), &hOrigin, &w, &h, &c);
//
we = w + 2, he = h + 2;
hSrc = (unsigned char*)malloc(sizeof(unsigned char) * we * he * c);
hResult = (unsigned char*)malloc(sizeof(unsigned char) * w * h * c);
expandBoundaries(hSrc, hOrigin, w, h, we, he);
int countDevice;
bool cudaIsAvailable = hipGetDeviceCount(&countDevice) == hipSuccess;
float acceleration = 0;
if (!cudaIsAvailable || compare) {
// CPU, CUDA
if (!cudaIsAvailable) {
fprintf(stdout, "No device available for execution, processing on CPU\n");
fflush(stdout);
}
clock_t startCPU, stopCPU;
startCPU = clock();
filter3CPU(hResult, hSrc, w, h);
stopCPU = clock();
float elapsedTimeCPU = (float)(stopCPU - startCPU);
fprintf(stdout, "Elapsed CPU time: %.3f\n", elapsedTimeCPU);
fflush(stdout);
acceleration = elapsedTimeCPU;
if (saveImages) {
std::string fileNameFilteredCPU = filename;
fileNameFilteredCPU.replace(fileNameFilteredCPU.end() - 4, fileNameFilteredCPU.end(), "f_cpu.ppm");
__savePPM(fileNameFilteredCPU.c_str(), hResult, w, h, c);
}
}
if (cudaIsAvailable) {
// GPU
hipMemcpyToSymbol(FILTER3_GPU, FILTER3_CPU, sizeof(FILTER3_CPU));
hipMemcpyToSymbol(gdivisor, &divisor, sizeof(divisor));
hipMemcpyToSymbol(goffset, &offset, sizeof(offset));
hDst = (unsigned char*)malloc(sizeof(unsigned char) * w * h * c);
memset(hDst, 0, w * h * c);
hipMallocPitch(&dSrc, &pitchSrc, we * c, he);
hipMallocPitch(&dDst, &pitchDst, w * c, h);
hipMemset2D(dDst, pitchDst, 0, w * c, h);
hipMemcpy2D(dSrc, pitchSrc, hSrc, we * c, we * c, he, hipMemcpyHostToDevice);
dim3 dimBlock(threadsX, threadsY);
//
dim3 dimGrid((w + 127) / 128, (h + 7) / 8);
hipEvent_t startGPU, stopGPU;
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
hipEventRecord(startGPU, 0);
hipLaunchKernelGGL(( filter3GPU) , dim3(dimGrid), dim3(dimBlock) , 0, 0, (int*)dDst, (const int*)dSrc, pitchDst >> 2, pitchSrc >> 2, h);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
float elapsedTimeGPU;
hipEventElapsedTime(&elapsedTimeGPU, startGPU, stopGPU);
fprintf(stdout, "Elapsed GPU time: %.3f\n", elapsedTimeGPU);
fflush(stdout);
hipEventDestroy(startGPU);
hipEventDestroy(stopGPU);
acceleration = acceleration / elapsedTimeGPU;
hipMemcpy2D(hDst, w * c, dDst, pitchDst, w * c, h, hipMemcpyDeviceToHost);
if (saveImages) {
std::string fileNameFilteredGPU = filename;
fileNameFilteredGPU.replace(fileNameFilteredGPU.end() - 4, fileNameFilteredGPU.end(), "f_gpu.ppm");
__savePPM(fileNameFilteredGPU.c_str(), hDst, w, h, c);
}
hipFree(dSrc);
hipFree(dDst);
hipDeviceReset();
}
if (compare && cudaIsAvailable) {
size_t errorIdx = verify(hDst, hResult, w * h * c);
if (errorIdx != -1) fprintf(stderr, "Error at %zu\n", errorIdx);
else fprintf(stdout, "Verified!\nAcceleration: %.3f\n", acceleration);
fflush(stdout);
}
if (hDst != NULL) free(hDst);
if (hResult != NULL) free(hResult);
if (hSrc != NULL) free(hSrc);
if (hOrigin != NULL) free(hOrigin);
}
extern "C" void __declspec(dllexport) runFilter5(unsigned char* filter, unsigned char divisor_,
unsigned char offset_, const char* cFileame, const unsigned char compare)
{
std::string filename(cFileame);
unsigned char *hOrigin = NULL, *hSrc = NULL, *hDst = NULL, *hResult = NULL, *dSrc = NULL, *dDst = NULL;
unsigned int w, h, c, we, he;
size_t pitchSrc, pitchDst;
for (size_t i = 0; i < FILTER5_SIZE; ++i)
{
FILTER5_CPU[i] = filter[i];
}
divisor = divisor_;
offset = offset_;
__loadPPM(filename.c_str(), &hOrigin, &w, &h, &c);
//
we = w + 4, he = h + 4;
hSrc = (unsigned char*)malloc(sizeof(unsigned char) * we * he * c);
hResult = (unsigned char*)malloc(sizeof(unsigned char) * w * h * c);
expandBoundaries(hSrc, hOrigin, w, h, we, he);
int countDevice;
bool cudaIsAvailable = hipGetDeviceCount(&countDevice) == hipSuccess;
float acceleration = 0;
if (!cudaIsAvailable || compare) {
// CPU, CUDA
if (countDevice == 0) {
fprintf(stdout, "No device available for execution, processing on CPU\n");
fflush(stdout);
}
clock_t startCPU, stopCPU;
startCPU = clock();
filter5CPU(hResult, hSrc, w, h);
stopCPU = clock();
float elapsedTimeCPU = (float)(stopCPU - startCPU);
fprintf(stdout, "Elapsed CPU time: %.3f\n", elapsedTimeCPU);
fflush(stdout);
acceleration = elapsedTimeCPU;
if (saveImages) {
std::string fileNameFilteredCPU = filename;
fileNameFilteredCPU.replace(fileNameFilteredCPU.end() - 4, fileNameFilteredCPU.end(), "f_cpu.ppm");
__savePPM(fileNameFilteredCPU.c_str(), hResult, w, h, c);
}
}
if (cudaIsAvailable) {
// GPU
hipMemcpyToSymbol(FILTER5_GPU, FILTER5_CPU, sizeof(FILTER5_CPU));
hipMemcpyToSymbol(gdivisor, &divisor, sizeof(divisor));
hipMemcpyToSymbol(goffset, &offset, sizeof(offset));
hDst = (unsigned char*)malloc(sizeof(unsigned char) * w * h * c);
memset(hDst, 0, w * h * c);
hipMallocPitch(&dSrc, &pitchSrc, we * c, he);
hipMallocPitch(&dDst, &pitchDst, w * c, h);
hipMemset2D(dDst, pitchDst, 0, w * c, h);
hipMemcpy2D(dSrc, pitchSrc, hSrc, we * c, we * c, he, hipMemcpyHostToDevice);
dim3 dimBlock(threadsX, threadsY);
//
dim3 dimGrid((w + 127) / 128, (h + 7) / 8);
hipEvent_t startGPU, stopGPU;
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
hipEventRecord(startGPU, 0);
hipLaunchKernelGGL(( filter5GPU) , dim3(dimGrid), dim3(dimBlock) , 0, 0, (int*)dDst, (const int*)dSrc, pitchDst >> 2, pitchSrc >> 2, h);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
float elapsedTimeGPU;
hipEventElapsedTime(&elapsedTimeGPU, startGPU, stopGPU);
fprintf(stdout, "Elapsed GPU time: %.3f\n", elapsedTimeGPU);
fflush(stdout);
hipEventDestroy(startGPU);
hipEventDestroy(stopGPU);
acceleration = acceleration / elapsedTimeGPU;
hipMemcpy2D(hDst, w * c, dDst, pitchDst, w * c, h, hipMemcpyDeviceToHost);
if (saveImages) {
std::string fileNameFilteredGPU = filename;
fileNameFilteredGPU.replace(fileNameFilteredGPU.end() - 4, fileNameFilteredGPU.end(), "f_gpu.ppm");
__savePPM(fileNameFilteredGPU.c_str(), hDst, w, h, c);
}
hipFree(dSrc);
hipFree(dDst);
hipDeviceReset();
}
if (compare && cudaIsAvailable) {
size_t errorIdx = verify(hDst, hResult, w * h * c);
if (errorIdx != -1) fprintf(stderr, "Error at %zu\n", errorIdx);
else fprintf(stdout, "Verified!\nAcceleration: %.3f\n", acceleration);
fflush(stdout);
}
if (hDst != NULL) free(hDst);
if (hResult != NULL) free(hResult);
if (hSrc != NULL) free(hSrc);
if (hOrigin != NULL) free(hOrigin);
}
//
//int main()
//{
// std::string filename = "D:\\Univer\\AVS\\ImageProcessing\\world.ppm";
// //std::cin >> filename;
// /*unsigned char arr[FILTER3_SIZE] = { 0, 0, 0,
// 0, -1, 0,
// 0, 0, 0 };*/
// unsigned char arr[FILTER5_SIZE] = { 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, -1, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0 };
// int input;
// unsigned char divisor_, offset_;
// std::cout << "Enter 5*5 filter: " << std::endl;
// /*for (size_t i = 0; i < FILTER5_SIZE; ++i) {
// std::cin >> input;
// arr[i] = input;
// }*/
// std::cout << "Enter div: " << std::endl;
// std::cin >> input;
// divisor_ = input;
// std::cout << "Enter offset: " << std::endl;
// std::cin >> input;
// offset_ = input;
// runFilter5(arr, divisor_, offset_, filename.c_str(), 1);
// std::fflush(stdout);
//}
|
deccadefae78f475d00a2b3b63735b7f8c16df5d.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "helper_image.h"
#include "kernel.h"
#include <stdio.h>
#include <time.h>
#include <string>
#define C2IDX(i, j, w) (( i ) * ( w ) + ( j ))
const bool saveImages = true;
const size_t channels = 3;
//чтобы не мучаться - не переменная или указатель, а массив единичной длины
__constant__ unsigned char gdivisor[1];
__constant__ unsigned char goffset[1];
unsigned char divisor = 1;
unsigned char offset = 0;
const size_t threadsX = 32, threadsY = 8;
unsigned char FILTER5_CPU[] = {0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0 };
const size_t FILTER5_SIZE = sizeof(FILTER5_CPU) / sizeof(unsigned char);
__constant__ unsigned char FILTER5_GPU[FILTER5_SIZE];
unsigned char FILTER3_CPU[] = { 0, 0, 0,
0, 1, 0,
0, 0, 0 };
const size_t FILTER3_SIZE = sizeof(FILTER3_CPU) / sizeof(unsigned char);
__constant__ unsigned char FILTER3_GPU[FILTER3_SIZE];
//сравниваем результат на GPU с результатом на CPU
size_t verify(const unsigned char * a, const unsigned char * gold, const size_t len) {
for (size_t i = 0; i < len; ++i) if (a[i] != gold[i]) return i;
return -1;
}
void expandBoundaries(unsigned char *dst, const unsigned char *src, const size_t w, const size_t h, const size_t we, const size_t he) {
const size_t halfDim = (we - w) / 2;
for (size_t ip = 0, i = 0; ip < he && i < h; ip < halfDim || ip >= he - 2*halfDim ? i : ++i, ++ip)
for (size_t jp = 0, j = 0; jp < we && j < w; jp < halfDim || jp >= we - 2*halfDim ? j : ++j, ++jp)
for (size_t k = 0; k < channels; ++k)
dst[C2IDX(ip, jp * channels + k, we * channels)] = src[C2IDX(i, j * channels + k, w * channels)];
}
unsigned char applyFilterCPU(unsigned char *seq, const unsigned char *filter, const size_t size) {
unsigned int result = 0;
for (size_t i = 0; i < size; ++i)
result += seq[i] * filter[i];
result = result / divisor + offset;
return (unsigned char) result;
}
void filter3CPU(unsigned char *dst, const unsigned char *src, const size_t w, const size_t h) {
const size_t we = w + 2;
for (size_t i = 0, ip = 1; i < h; ++i, ++ip) {
for (size_t j = 0, jp = 1; j < w; ++j, ++jp) {
for (size_t c = 0; c < channels; ++c) {
unsigned char temp[FILTER3_SIZE] = {
src[C2IDX(ip - 1, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip - 1, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip - 1, (jp + 1) * channels + c, we * channels)],
src[C2IDX(ip + 0, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip + 0, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip + 0, (jp + 1) * channels + c, we * channels)],
src[C2IDX(ip + 1, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip + 1, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip + 1, (jp + 1) * channels + c, we * channels)]
};
dst[C2IDX(i, j * channels + c, w * channels)] = applyFilterCPU(temp, FILTER3_CPU, FILTER3_SIZE);
}
}
}
}
void filter5CPU(unsigned char *dst, const unsigned char *src, const size_t w, const size_t h) {
const size_t we = w + 4;
for (size_t i = 0, ip = 2; i < h; ++i, ++ip) {
for (size_t j = 0, jp = 2; j < w; ++j, ++jp) {
for (size_t c = 0; c < channels; ++c) {
unsigned char temp[FILTER5_SIZE] = {
src[C2IDX(ip - 2, (jp - 2) * channels + c, we * channels)], src[C2IDX(ip - 2, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip - 2, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip - 2, (jp + 1) * channels + c, we * channels)],src[C2IDX(ip - 2, (jp + 2) * channels + c, we * channels)],
src[C2IDX(ip - 1, (jp - 2) * channels + c, we * channels)], src[C2IDX(ip - 1, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip - 1, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip - 1, (jp + 1) * channels + c, we * channels)],src[C2IDX(ip - 1, (jp + 2) * channels + c, we * channels)],
src[C2IDX(ip + 0, (jp - 2) * channels + c, we * channels)], src[C2IDX(ip + 0, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip + 0, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip + 0, (jp + 1) * channels + c, we * channels)],src[C2IDX(ip + 0, (jp + 2) * channels + c, we * channels)],
src[C2IDX(ip + 1, (jp - 2) * channels + c, we * channels)], src[C2IDX(ip + 1, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip + 1, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip + 1, (jp + 1) * channels + c, we * channels)],src[C2IDX(ip + 1, (jp + 2) * channels + c, we * channels)],
src[C2IDX(ip + 2, (jp - 2) * channels + c, we * channels)], src[C2IDX(ip + 2, (jp - 1) * channels + c, we * channels)], src[C2IDX(ip + 2, (jp + 0) * channels + c, we * channels)],src[C2IDX(ip + 2, (jp + 1) * channels + c, we * channels)],src[C2IDX(ip + 2, (jp + 2) * channels + c, we * channels)]
};
dst[C2IDX(i, j * channels + c, w * channels)] = applyFilterCPU(temp, FILTER5_CPU, FILTER5_SIZE);
}
}
}
}
__device__ unsigned char applyFilterGPU(unsigned char* seq, const unsigned char* filter, const size_t size) {
unsigned int result = 0;
for (size_t i = 0; i < size; ++i)
result += seq[i] * filter[i];
result = result / gdivisor[0] + goffset[0];
return unsigned char(result);
}
__global__ void filter3GPU(int *dst, const int *src, const size_t pitchDst, const size_t pitchSrc, const size_t h) {
__shared__ int smemIn[threadsY + 2][(threadsX)* channels + 2];
__shared__ int smemOut[threadsY][(threadsX)* channels];
size_t Y = blockIdx.y * blockDim.y + threadIdx.y;
size_t X = (blockIdx.x * blockDim.x) * channels + threadIdx.x;
if (X < pitchSrc && Y < h + 2) {
// загрузка изображения
for (size_t i = 0; i < channels; ++i)
smemIn[threadIdx.y][threadIdx.x + threadsX * i] = src[C2IDX(Y, X + threadsX * i, pitchSrc)];
// 2 крайних справа
if (threadIdx.x <= 1)
smemIn[threadIdx.y][threadIdx.x + threadsX * channels] = src[C2IDX(Y, X + threadsX * channels, pitchSrc)];
// ну и оставшиеся 2 края снизу
if (threadIdx.y >= blockDim.y - 2 && Y + 2 < h + 2) {
for (size_t i = 0; i < channels; ++i)
smemIn[threadIdx.y + 2][threadIdx.x + threadsX * i] = src[C2IDX(Y + 2, X + threadsX * i, pitchSrc)];
// 2 крайних справа
if (threadIdx.x < 2)
smemIn[threadIdx.y + 2][threadIdx.x + threadsX * channels] = src[C2IDX(Y + 2, X + threadsX * channels, pitchSrc)];
}
}
// синхронизируем нити, чтобы гарантировать,
// что в разделяемую память записаны все данные
__syncthreads();
// по ширине 4
// + 1 с каждого края
unsigned char data[3][6 * channels];
for (size_t i = 0; i < 3; ++i) {
memcpy(data[i], &smemIn[threadIdx.y + i][threadIdx.x * channels], 6 * channels);
}
// оптимальное чтение - 4 байта, при этом каждый пиксель - 3 байта
// поэтому выбираем 4 * 3
char result[4 * channels] = { 0 };
for (size_t i = 0; i < 4; ++i) {
for (size_t c = 0; c < channels; ++c) {
unsigned char temp[9] = {
data[0][(i + 0) * channels + c], data[0][(i + 1) * channels + c], data[0][(i + 2) * channels + c],
data[1][(i + 0) * channels + c], data[1][(i + 1) * channels + c], data[1][(i + 2) * channels + c],
data[2][(i + 0) * channels + c], data[2][(i + 1) * channels + c], data[2][(i + 2) * channels + c]
};
result[i * channels + c] = applyFilterGPU(temp, FILTER3_GPU, FILTER3_SIZE);
}
}
memcpy(&smemOut[threadIdx.y][threadIdx.x * channels], result, 4 * channels);
if (X < pitchDst && Y < h)
for (size_t i = 0; i < channels; ++i)
dst[C2IDX(Y, X + threadsX * i, pitchDst)] = smemOut[threadIdx.y][threadsX * i + threadIdx.x];
}
__global__ void filter5GPU(int *dst, const int *src, const size_t pitchDst, const size_t pitchSrc, const size_t h) {
__shared__ int smemIn[threadsY + 4][(threadsX)* channels + 4];
__shared__ int smemOut[threadsY][(threadsX)* channels];
size_t Y = blockIdx.y * blockDim.y + threadIdx.y;
size_t X = (blockIdx.x * blockDim.x) * channels + threadIdx.x;
if (X < pitchSrc && Y < h + 4) {
// загрузка изображения
for (size_t i = 0; i < channels; ++i)
smemIn[threadIdx.y][threadIdx.x + threadsX * i] = src[C2IDX(Y, X + threadsX * i, pitchSrc)];
// 4 крайних справа
if (threadIdx.x < 4)
smemIn[threadIdx.y][threadIdx.x + threadsX * channels] = src[C2IDX(Y, X + threadsX * channels, pitchSrc)];
// ну и оставшиеся 4 края снизу
if (threadIdx.y >= blockDim.y - 4 && Y + 4 < h + 4) {
for (size_t i = 0; i < channels; ++i)
smemIn[threadIdx.y + 4][threadIdx.x + threadsX * i] = src[C2IDX(Y + 4, X + threadsX * i, pitchSrc)];
// 4 крайних справа
if (threadIdx.x < 4)
smemIn[threadIdx.y + 4][threadIdx.x + threadsX * channels] = src[C2IDX(Y + 4, X + threadsX * channels, pitchSrc)];
}
}
// синхронизируем нити, чтобы гарантировать,
// что в разделяемую память записаны все данные
__syncthreads();
// ширина 4
// + 2 с каждого края
unsigned char data[5][8 * channels];
for (size_t i = 0; i < 5; ++i) {
memcpy(data[i], &smemIn[threadIdx.y + i][threadIdx.x * channels], 8 * channels);
}
// оптимальное чтение - 4 байта, при этом каждый пиксель - 3 байта
// поэтому выбираем 4 * 3
char result[4 * channels] = { 0 };
for (size_t i = 0; i < 4; ++i) {
for (size_t c = 0; c < channels; ++c) {
unsigned char temp[] = {
data[0][(i + 0) * channels + c], data[0][(i + 1) * channels + c], data[0][(i + 2) * channels + c], data[0][(i + 3) * channels + c],data[0][(i + 4) * channels + c],
data[1][(i + 0) * channels + c], data[1][(i + 1) * channels + c], data[1][(i + 2) * channels + c], data[1][(i + 3) * channels + c],data[1][(i + 4) * channels + c],
data[2][(i + 0) * channels + c], data[2][(i + 1) * channels + c], data[2][(i + 2) * channels + c], data[2][(i + 3) * channels + c],data[2][(i + 4) * channels + c],
data[3][(i + 0) * channels + c], data[3][(i + 1) * channels + c], data[3][(i + 2) * channels + c], data[3][(i + 3) * channels + c],data[3][(i + 4) * channels + c],
data[4][(i + 0) * channels + c], data[4][(i + 1) * channels + c], data[4][(i + 2) * channels + c], data[4][(i + 3) * channels + c],data[4][(i + 4) * channels + c],
};
result[i * channels + c] = applyFilterGPU(temp, FILTER5_GPU, FILTER5_SIZE);
}
}
memcpy(&smemOut[threadIdx.y][threadIdx.x * channels], result, 4 * channels);
if (X < pitchDst && Y < h)
for (size_t i = 0; i < channels; ++i)
dst[C2IDX(Y, X + threadsX * i, pitchDst)] = smemOut[threadIdx.y][threadsX * i + threadIdx.x];
}
extern "C" void __declspec(dllexport) runFilter3(unsigned char* filter, unsigned char divisor_,
unsigned char offset_, const char* cFileame, const unsigned char compare)
{
std::string filename(cFileame);
unsigned char *hOrigin = NULL, *hSrc = NULL, *hDst = NULL, *hResult = NULL, *dSrc = NULL, *dDst = NULL;
unsigned int w, h, c, we, he;
size_t pitchSrc, pitchDst;
for (size_t i = 0; i < FILTER3_SIZE; ++i)
{
FILTER3_CPU[i] = filter[i];
}
divisor = divisor_;
offset = offset_;
__loadPPM(filename.c_str(), &hOrigin, &w, &h, &c);
// расширение границ
we = w + 2, he = h + 2;
hSrc = (unsigned char*)malloc(sizeof(unsigned char) * we * he * c);
hResult = (unsigned char*)malloc(sizeof(unsigned char) * w * h * c);
expandBoundaries(hSrc, hOrigin, w, h, we, he);
int countDevice;
bool cudaIsAvailable = cudaGetDeviceCount(&countDevice) == cudaSuccess;
float acceleration = 0;
if (!cudaIsAvailable || compare) {
// CPU, если не поддерживается CUDA
if (!cudaIsAvailable) {
fprintf(stdout, "No device available for execution, processing on CPU\n");
fflush(stdout);
}
clock_t startCPU, stopCPU;
startCPU = clock();
filter3CPU(hResult, hSrc, w, h);
stopCPU = clock();
float elapsedTimeCPU = (float)(stopCPU - startCPU);
fprintf(stdout, "Elapsed CPU time: %.3f\n", elapsedTimeCPU);
fflush(stdout);
acceleration = elapsedTimeCPU;
if (saveImages) {
std::string fileNameFilteredCPU = filename;
fileNameFilteredCPU.replace(fileNameFilteredCPU.end() - 4, fileNameFilteredCPU.end(), "f_cpu.ppm");
__savePPM(fileNameFilteredCPU.c_str(), hResult, w, h, c);
}
}
if (cudaIsAvailable) {
// GPU
cudaMemcpyToSymbol(FILTER3_GPU, FILTER3_CPU, sizeof(FILTER3_CPU));
cudaMemcpyToSymbol(gdivisor, &divisor, sizeof(divisor));
cudaMemcpyToSymbol(goffset, &offset, sizeof(offset));
hDst = (unsigned char*)malloc(sizeof(unsigned char) * w * h * c);
memset(hDst, 0, w * h * c);
cudaMallocPitch(&dSrc, &pitchSrc, we * c, he);
cudaMallocPitch(&dDst, &pitchDst, w * c, h);
cudaMemset2D(dDst, pitchDst, 0, w * c, h);
cudaMemcpy2D(dSrc, pitchSrc, hSrc, we * c, we * c, he, cudaMemcpyHostToDevice);
dim3 dimBlock(threadsX, threadsY);
//округление вверх
dim3 dimGrid((w + 127) / 128, (h + 7) / 8);
cudaEvent_t startGPU, stopGPU;
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
cudaEventRecord(startGPU, 0);
filter3GPU <<< dimGrid, dimBlock >>> ((int*)dDst, (const int*)dSrc, pitchDst >> 2, pitchSrc >> 2, h);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
float elapsedTimeGPU;
cudaEventElapsedTime(&elapsedTimeGPU, startGPU, stopGPU);
fprintf(stdout, "Elapsed GPU time: %.3f\n", elapsedTimeGPU);
fflush(stdout);
cudaEventDestroy(startGPU);
cudaEventDestroy(stopGPU);
acceleration = acceleration / elapsedTimeGPU;
cudaMemcpy2D(hDst, w * c, dDst, pitchDst, w * c, h, cudaMemcpyDeviceToHost);
if (saveImages) {
std::string fileNameFilteredGPU = filename;
fileNameFilteredGPU.replace(fileNameFilteredGPU.end() - 4, fileNameFilteredGPU.end(), "f_gpu.ppm");
__savePPM(fileNameFilteredGPU.c_str(), hDst, w, h, c);
}
cudaFree(dSrc);
cudaFree(dDst);
cudaDeviceReset();
}
if (compare && cudaIsAvailable) {
size_t errorIdx = verify(hDst, hResult, w * h * c);
if (errorIdx != -1) fprintf(stderr, "Error at %zu\n", errorIdx);
else fprintf(stdout, "Verified!\nAcceleration: %.3f\n", acceleration);
fflush(stdout);
}
if (hDst != NULL) free(hDst);
if (hResult != NULL) free(hResult);
if (hSrc != NULL) free(hSrc);
if (hOrigin != NULL) free(hOrigin);
}
extern "C" void __declspec(dllexport) runFilter5(unsigned char* filter, unsigned char divisor_,
unsigned char offset_, const char* cFileame, const unsigned char compare)
{
std::string filename(cFileame);
unsigned char *hOrigin = NULL, *hSrc = NULL, *hDst = NULL, *hResult = NULL, *dSrc = NULL, *dDst = NULL;
unsigned int w, h, c, we, he;
size_t pitchSrc, pitchDst;
for (size_t i = 0; i < FILTER5_SIZE; ++i)
{
FILTER5_CPU[i] = filter[i];
}
divisor = divisor_;
offset = offset_;
__loadPPM(filename.c_str(), &hOrigin, &w, &h, &c);
// Расширение границ
we = w + 4, he = h + 4;
hSrc = (unsigned char*)malloc(sizeof(unsigned char) * we * he * c);
hResult = (unsigned char*)malloc(sizeof(unsigned char) * w * h * c);
expandBoundaries(hSrc, hOrigin, w, h, we, he);
int countDevice;
bool cudaIsAvailable = cudaGetDeviceCount(&countDevice) == cudaSuccess;
float acceleration = 0;
if (!cudaIsAvailable || compare) {
// CPU, если не поддерживается CUDA
if (countDevice == 0) {
fprintf(stdout, "No device available for execution, processing on CPU\n");
fflush(stdout);
}
clock_t startCPU, stopCPU;
startCPU = clock();
filter5CPU(hResult, hSrc, w, h);
stopCPU = clock();
float elapsedTimeCPU = (float)(stopCPU - startCPU);
fprintf(stdout, "Elapsed CPU time: %.3f\n", elapsedTimeCPU);
fflush(stdout);
acceleration = elapsedTimeCPU;
if (saveImages) {
std::string fileNameFilteredCPU = filename;
fileNameFilteredCPU.replace(fileNameFilteredCPU.end() - 4, fileNameFilteredCPU.end(), "f_cpu.ppm");
__savePPM(fileNameFilteredCPU.c_str(), hResult, w, h, c);
}
}
if (cudaIsAvailable) {
// GPU
cudaMemcpyToSymbol(FILTER5_GPU, FILTER5_CPU, sizeof(FILTER5_CPU));
cudaMemcpyToSymbol(gdivisor, &divisor, sizeof(divisor));
cudaMemcpyToSymbol(goffset, &offset, sizeof(offset));
hDst = (unsigned char*)malloc(sizeof(unsigned char) * w * h * c);
memset(hDst, 0, w * h * c);
cudaMallocPitch(&dSrc, &pitchSrc, we * c, he);
cudaMallocPitch(&dDst, &pitchDst, w * c, h);
cudaMemset2D(dDst, pitchDst, 0, w * c, h);
cudaMemcpy2D(dSrc, pitchSrc, hSrc, we * c, we * c, he, cudaMemcpyHostToDevice);
dim3 dimBlock(threadsX, threadsY);
//округление вверх
dim3 dimGrid((w + 127) / 128, (h + 7) / 8);
cudaEvent_t startGPU, stopGPU;
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
cudaEventRecord(startGPU, 0);
filter5GPU <<< dimGrid, dimBlock >>> ((int*)dDst, (const int*)dSrc, pitchDst >> 2, pitchSrc >> 2, h);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
float elapsedTimeGPU;
cudaEventElapsedTime(&elapsedTimeGPU, startGPU, stopGPU);
fprintf(stdout, "Elapsed GPU time: %.3f\n", elapsedTimeGPU);
fflush(stdout);
cudaEventDestroy(startGPU);
cudaEventDestroy(stopGPU);
acceleration = acceleration / elapsedTimeGPU;
cudaMemcpy2D(hDst, w * c, dDst, pitchDst, w * c, h, cudaMemcpyDeviceToHost);
if (saveImages) {
std::string fileNameFilteredGPU = filename;
fileNameFilteredGPU.replace(fileNameFilteredGPU.end() - 4, fileNameFilteredGPU.end(), "f_gpu.ppm");
__savePPM(fileNameFilteredGPU.c_str(), hDst, w, h, c);
}
cudaFree(dSrc);
cudaFree(dDst);
cudaDeviceReset();
}
if (compare && cudaIsAvailable) {
size_t errorIdx = verify(hDst, hResult, w * h * c);
if (errorIdx != -1) fprintf(stderr, "Error at %zu\n", errorIdx);
else fprintf(stdout, "Verified!\nAcceleration: %.3f\n", acceleration);
fflush(stdout);
}
if (hDst != NULL) free(hDst);
if (hResult != NULL) free(hResult);
if (hSrc != NULL) free(hSrc);
if (hOrigin != NULL) free(hOrigin);
}
// для тестрирования
//int main()
//{
// std::string filename = "D:\\Univer\\AVS\\ImageProcessing\\world.ppm";
// //std::cin >> filename;
// /*unsigned char arr[FILTER3_SIZE] = { 0, 0, 0,
// 0, -1, 0,
// 0, 0, 0 };*/
// unsigned char arr[FILTER5_SIZE] = { 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, -1, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0 };
// int input;
// unsigned char divisor_, offset_;
// std::cout << "Enter 5*5 filter: " << std::endl;
// /*for (size_t i = 0; i < FILTER5_SIZE; ++i) {
// std::cin >> input;
// arr[i] = input;
// }*/
// std::cout << "Enter div: " << std::endl;
// std::cin >> input;
// divisor_ = input;
// std::cout << "Enter offset: " << std::endl;
// std::cin >> input;
// offset_ = input;
// runFilter5(arr, divisor_, offset_, filename.c_str(), 1);
// std::fflush(stdout);
//}
|
b16421f01239404d0d24c4d9b181096303923876.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mmreader.hpp"
#include <time.h>
#include <iostream>
#include <sys/time.h>
#include <unistd.h>
#include <algorithm>
#include <cfloat>
#define TILE_WIDTH 32
#define PARTIAL_ROW 150000
using namespace std;
bool nearly_equal(float a, float b, float epsilon);
bool
SCsrMatrixfromFile(struct sparse_mtx *A, const char* filePath)
{
// Check that the file format is matrix market; the only format we can read right now
// This is not a complete solution, and fails for directories with file names etc...
// TODO: Should we use boost filesystem?
std::string strPath( filePath );
if( strPath.find_last_of( '.' ) != std::string::npos )
{
std::string ext = strPath.substr( strPath.find_last_of( '.' ) + 1 );
if( ext != "mtx" )
{
std::cout << "Reading file name error" << std::endl;
return false;
}
}
else
return false;
// Read data from a file on disk into buffers
// Data is read natively as COO format with the reader
MatrixMarketReader mm_reader;
if( mm_reader.MMReadFormat(filePath) )
return false;
// JPA: Shouldn't that just be an assertion check? It seems to me that
// the user have to call clsparseHeaderfromFile before calling this function,
// otherwise the whole pCsrMatrix will be broken;
A->nrow = mm_reader.GetNumRows( );
A->ncol = mm_reader.GetNumCols( );
A->nnze = mm_reader.GetNumNonZeroes( );
A->row = (int32_t *)malloc((A->nrow + 1) * sizeof(int32_t));
A->val = (float *)malloc(A->nnze * sizeof(float));
A->col = (int32_t *)malloc(A->nnze * sizeof(int32_t));
if(A->row == NULL || A->col == NULL || A->val == NULL)
{
if(A->row == NULL)
free((void *)A->row);
if(A->col == NULL)
free((void *)A->col);
if(A->val == NULL)
free((void *)A->val);
return false;
}
// The following section of code converts the sparse format from COO to CSR
Coordinate* coords = mm_reader.GetUnsymCoordinates( );
std::sort( coords, coords + A->nnze, CoordinateCompare );
int32_t current_row = 1;
A->row[ 0 ] = 0;
for (int32_t i = 0; i < A->nnze; i++)
{
A->col[ i ] = coords[ i ].y;
A->val[ i ] = coords[ i ].val;
while( coords[ i ].x >= current_row )
A->row[ current_row++ ] = i;
}
A->row[ current_row ] = A->nnze;
while( current_row <= A->nrow )
A->row[ current_row++ ] = A->nnze;
return true;
}
void multiply_single(struct sparse_mtx *A, struct dense_mtx *B, struct dense_mtx *C)
{
C->nrow = A->nrow;
C->ncol = B->ncol;
C->val = (float *)malloc(C->nrow * C->ncol * sizeof(float));
if(C->val == NULL)
return;
memset(C->val, 0, sizeof(float) * C->nrow * C->ncol);
// TODO: Implement matrix multiplication with single thread. C=A*B
for(int32_t i = 0; i < A->nrow; i++)
{
int32_t A_col_start = A->row[i];
int32_t A_col_stop = A->row[i + 1];
for(int32_t j = A_col_start; j < A_col_stop; j++)
{
int32_t B_row = A->col[j];
for(int32_t k = 0; k < B->ncol; k++)
C->val[i * C->ncol + k] += A->val[j] * B->val[B_row * B->ncol + k];
}
}
}
__global__ void multiply_kernel(int32_t *a_row, int32_t *a_col, float *a_val, float *b_val, float *c_val,
int a_nrow, int b_ncol, int depth, int partial_row)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_c = by * blockDim.y + ty;
int row = row_c + (partial_row * depth);
int col = bx * blockDim.x + tx;
float temp = 0;
if (row < a_nrow && col < b_ncol) {
for (int i = a_row[row]; i < a_row[row+1]; i++)
temp += a_val[i] * b_val[a_col[i]*b_ncol+col];
c_val[row_c*b_ncol+col] = temp;
}
}
uint64_t GetTimeStamp() {
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec*(uint64_t)1000000+tv.tv_usec;
}
int main(int argc, char **argv)
{
struct sparse_mtx A;
if(!SCsrMatrixfromFile(&A, argv[1]))
{
std::cout << "read failed." << std::endl;
return 0;
}
std::cout << "Matrix: " << argv[1] << std::endl;
struct dense_mtx B;
B.nrow = A.ncol;
B.ncol = atoi(argv[2]);
if(B.ncol < 0)
{
free(A.row);
free(A.col);
free(A.val);
std::cerr << "Invalid argument for the number of columns of B." << std::endl;
}
B.val = (float *)malloc(sizeof(float) * B.nrow * B.ncol);
//srand((unsigned int)time(NULL));
for(int i = 0; i < B.nrow; i++)
{
for(int j = 0; j < B.ncol; j++)
{
B.val[B.ncol * i + j] = ((float)rand()/(float)(RAND_MAX)) * ((rand() % 2) ? 1.0f : -1.0f);
}
}
struct dense_mtx C1, C2;
C1.val = NULL;
C1.nrow = A.nrow;
C1.ncol = B.ncol;
C2.val = NULL;
C2.nrow = A.nrow;
C2.ncol = B.ncol;
uint64_t time_s, time_p;
std::cout << "Single Thread Computation Start" << std::endl;
uint64_t start = GetTimeStamp();
multiply_single(&A, &B, &C1);
uint64_t end = GetTimeStamp();
std::cout << "Single Thread Computation End: " << end - start << " us." << std::endl;
time_s = end - start;
int32_t *a_row;
int32_t *a_col;
float *a_val;
float *b_val;
float *c_val;
int grid_x, grid_y;
int depth;
int partial_row;
int copy_size, remaining_row;
remaining_row = C2.nrow;
partial_row = C2.nrow;
depth = 1;
if (C2.nrow > PARTIAL_ROW) {
partial_row = PARTIAL_ROW;
depth = C2.nrow / partial_row;
if (C2.nrow % partial_row)
depth++;
}
grid_x = C2.ncol / TILE_WIDTH;
if (C2.ncol % TILE_WIDTH)
grid_x++;
grid_y = partial_row / TILE_WIDTH;
if (partial_row % TILE_WIDTH)
grid_y++;
dim3 dimGrid(grid_x, grid_y, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
if (hipErrorMemoryAllocation == hipMalloc(&a_row, sizeof(int32_t) * (A.nrow + 1)))
fprintf(stderr, "error1\n");
if (hipErrorMemoryAllocation == hipMalloc(&a_col, sizeof(int32_t) * A.nnze))
fprintf(stderr, "error2\n");
if (hipErrorMemoryAllocation == hipMalloc(&a_val, sizeof(float) * A.nnze))
fprintf(stderr, "error3\n");
if (hipErrorMemoryAllocation == hipMalloc(&b_val, sizeof(float) * B.nrow * B.ncol))
fprintf(stderr, "error4\n");
if (hipErrorMemoryAllocation == hipMalloc(&c_val, sizeof(float) * partial_row * C2.ncol))
fprintf(stderr, "error5\n");
if ((C2.val = (float *) malloc(sizeof(float) * C2.nrow * C2.ncol)) == NULL) {
fprintf(stderr, "malloc error at %d\n", __LINE__);
return 1;
}
memset(C2.val, 0, sizeof(float) * C2.nrow * C2.ncol);
std::cout << "Cuda Computation Start" << std::endl;
start = GetTimeStamp();
hipMemcpy(a_row, A.row, sizeof(int32_t) * (A.nrow + 1), hipMemcpyHostToDevice);
hipMemcpy(a_col, A.col, sizeof(int32_t) * A.nnze, hipMemcpyHostToDevice);
hipMemcpy(a_val, A.val, sizeof(float) * A.nnze, hipMemcpyHostToDevice);
hipMemcpy(b_val, B.val, sizeof(float) * B.nrow * B.ncol, hipMemcpyHostToDevice);
copy_size = sizeof(float) * partial_row * C2.ncol;
for (int i = 0; i < depth; i++) {
hipLaunchKernelGGL(( multiply_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, a_row, a_col, a_val, b_val, c_val,
A.nrow, B.ncol, i, partial_row);
if (remaining_row < partial_row)
copy_size = sizeof(float) * remaining_row * C2.ncol;
hipMemcpy(C2.val + (partial_row * i * C2.ncol), c_val,
copy_size, hipMemcpyDeviceToHost);
remaining_row -= partial_row;
}
end = GetTimeStamp();
std::cout << "Cuda Computation End: " << end - start << " us." << std::endl << std::endl;
time_p = end - start;
printf("Speed up is %g\n", (double) time_s / time_p);
// TODO: Testing Code by comparing C1 and C2
bool is_correct = true;
for (int i = 0; i < C1.nrow && is_correct == true; i++)
for (int j = 0; j < C1.ncol; j++)
if (!nearly_equal(C1.val[i*C1.ncol+j], C2.val[i*C2.ncol+j], 0)) {
printf("i, j: %d %d\n", i, j);
printf("%g %g\n", C1.val[i*C1.ncol+j], C2.val[i*C2.ncol+j]);
is_correct = false;
break;
}
printf("%s\n", is_correct ? "correct" : "wrong");
free(A.row);
free(A.col);
free(A.val);
free(B.val);
if(C1.val != NULL)
free(C1.val);
if(C2.val != NULL)
free(C2.val);
hipFree(a_row);
hipFree(a_col);
hipFree(a_val);
hipFree(b_val);
hipFree(c_val);
return 0;
}
bool nearly_equal(float a, float b, float epsilon)
{
float abs_a = abs(a);
float abs_b = abs(b);
float diff = abs(a - b);
if (epsilon == 0)
epsilon = 0.00001f;
if (a == b) { // shortcut, handles infinities
return true;
} else if (a == 0 || b == 0 || diff < FLT_MIN) {
// a or b is zero or both are extremely close to it
// relative error is less meaningful here
return diff < (epsilon * FLT_MIN);
} else { // use relative error
return diff / min((abs_a + abs_b), FLT_MAX) < epsilon;
}
}
|
b16421f01239404d0d24c4d9b181096303923876.cu
|
#include "mmreader.hpp"
#include <time.h>
#include <iostream>
#include <sys/time.h>
#include <unistd.h>
#include <algorithm>
#include <cfloat>
#define TILE_WIDTH 32
#define PARTIAL_ROW 150000
using namespace std;
bool nearly_equal(float a, float b, float epsilon);
bool
SCsrMatrixfromFile(struct sparse_mtx *A, const char* filePath)
{
// Check that the file format is matrix market; the only format we can read right now
// This is not a complete solution, and fails for directories with file names etc...
// TODO: Should we use boost filesystem?
std::string strPath( filePath );
if( strPath.find_last_of( '.' ) != std::string::npos )
{
std::string ext = strPath.substr( strPath.find_last_of( '.' ) + 1 );
if( ext != "mtx" )
{
std::cout << "Reading file name error" << std::endl;
return false;
}
}
else
return false;
// Read data from a file on disk into buffers
// Data is read natively as COO format with the reader
MatrixMarketReader mm_reader;
if( mm_reader.MMReadFormat(filePath) )
return false;
// JPA: Shouldn't that just be an assertion check? It seems to me that
// the user have to call clsparseHeaderfromFile before calling this function,
// otherwise the whole pCsrMatrix will be broken;
A->nrow = mm_reader.GetNumRows( );
A->ncol = mm_reader.GetNumCols( );
A->nnze = mm_reader.GetNumNonZeroes( );
A->row = (int32_t *)malloc((A->nrow + 1) * sizeof(int32_t));
A->val = (float *)malloc(A->nnze * sizeof(float));
A->col = (int32_t *)malloc(A->nnze * sizeof(int32_t));
if(A->row == NULL || A->col == NULL || A->val == NULL)
{
if(A->row == NULL)
free((void *)A->row);
if(A->col == NULL)
free((void *)A->col);
if(A->val == NULL)
free((void *)A->val);
return false;
}
// The following section of code converts the sparse format from COO to CSR
Coordinate* coords = mm_reader.GetUnsymCoordinates( );
std::sort( coords, coords + A->nnze, CoordinateCompare );
int32_t current_row = 1;
A->row[ 0 ] = 0;
for (int32_t i = 0; i < A->nnze; i++)
{
A->col[ i ] = coords[ i ].y;
A->val[ i ] = coords[ i ].val;
while( coords[ i ].x >= current_row )
A->row[ current_row++ ] = i;
}
A->row[ current_row ] = A->nnze;
while( current_row <= A->nrow )
A->row[ current_row++ ] = A->nnze;
return true;
}
void multiply_single(struct sparse_mtx *A, struct dense_mtx *B, struct dense_mtx *C)
{
C->nrow = A->nrow;
C->ncol = B->ncol;
C->val = (float *)malloc(C->nrow * C->ncol * sizeof(float));
if(C->val == NULL)
return;
memset(C->val, 0, sizeof(float) * C->nrow * C->ncol);
// TODO: Implement matrix multiplication with single thread. C=A*B
for(int32_t i = 0; i < A->nrow; i++)
{
int32_t A_col_start = A->row[i];
int32_t A_col_stop = A->row[i + 1];
for(int32_t j = A_col_start; j < A_col_stop; j++)
{
int32_t B_row = A->col[j];
for(int32_t k = 0; k < B->ncol; k++)
C->val[i * C->ncol + k] += A->val[j] * B->val[B_row * B->ncol + k];
}
}
}
__global__ void multiply_kernel(int32_t *a_row, int32_t *a_col, float *a_val, float *b_val, float *c_val,
int a_nrow, int b_ncol, int depth, int partial_row)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_c = by * blockDim.y + ty;
int row = row_c + (partial_row * depth);
int col = bx * blockDim.x + tx;
float temp = 0;
if (row < a_nrow && col < b_ncol) {
for (int i = a_row[row]; i < a_row[row+1]; i++)
temp += a_val[i] * b_val[a_col[i]*b_ncol+col];
c_val[row_c*b_ncol+col] = temp;
}
}
uint64_t GetTimeStamp() {
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec*(uint64_t)1000000+tv.tv_usec;
}
int main(int argc, char **argv)
{
struct sparse_mtx A;
if(!SCsrMatrixfromFile(&A, argv[1]))
{
std::cout << "read failed." << std::endl;
return 0;
}
std::cout << "Matrix: " << argv[1] << std::endl;
struct dense_mtx B;
B.nrow = A.ncol;
B.ncol = atoi(argv[2]);
if(B.ncol < 0)
{
free(A.row);
free(A.col);
free(A.val);
std::cerr << "Invalid argument for the number of columns of B." << std::endl;
}
B.val = (float *)malloc(sizeof(float) * B.nrow * B.ncol);
//srand((unsigned int)time(NULL));
for(int i = 0; i < B.nrow; i++)
{
for(int j = 0; j < B.ncol; j++)
{
B.val[B.ncol * i + j] = ((float)rand()/(float)(RAND_MAX)) * ((rand() % 2) ? 1.0f : -1.0f);
}
}
struct dense_mtx C1, C2;
C1.val = NULL;
C1.nrow = A.nrow;
C1.ncol = B.ncol;
C2.val = NULL;
C2.nrow = A.nrow;
C2.ncol = B.ncol;
uint64_t time_s, time_p;
std::cout << "Single Thread Computation Start" << std::endl;
uint64_t start = GetTimeStamp();
multiply_single(&A, &B, &C1);
uint64_t end = GetTimeStamp();
std::cout << "Single Thread Computation End: " << end - start << " us." << std::endl;
time_s = end - start;
int32_t *a_row;
int32_t *a_col;
float *a_val;
float *b_val;
float *c_val;
int grid_x, grid_y;
int depth;
int partial_row;
int copy_size, remaining_row;
remaining_row = C2.nrow;
partial_row = C2.nrow;
depth = 1;
if (C2.nrow > PARTIAL_ROW) {
partial_row = PARTIAL_ROW;
depth = C2.nrow / partial_row;
if (C2.nrow % partial_row)
depth++;
}
grid_x = C2.ncol / TILE_WIDTH;
if (C2.ncol % TILE_WIDTH)
grid_x++;
grid_y = partial_row / TILE_WIDTH;
if (partial_row % TILE_WIDTH)
grid_y++;
dim3 dimGrid(grid_x, grid_y, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
if (cudaErrorMemoryAllocation == cudaMalloc(&a_row, sizeof(int32_t) * (A.nrow + 1)))
fprintf(stderr, "error1\n");
if (cudaErrorMemoryAllocation == cudaMalloc(&a_col, sizeof(int32_t) * A.nnze))
fprintf(stderr, "error2\n");
if (cudaErrorMemoryAllocation == cudaMalloc(&a_val, sizeof(float) * A.nnze))
fprintf(stderr, "error3\n");
if (cudaErrorMemoryAllocation == cudaMalloc(&b_val, sizeof(float) * B.nrow * B.ncol))
fprintf(stderr, "error4\n");
if (cudaErrorMemoryAllocation == cudaMalloc(&c_val, sizeof(float) * partial_row * C2.ncol))
fprintf(stderr, "error5\n");
if ((C2.val = (float *) malloc(sizeof(float) * C2.nrow * C2.ncol)) == NULL) {
fprintf(stderr, "malloc error at %d\n", __LINE__);
return 1;
}
memset(C2.val, 0, sizeof(float) * C2.nrow * C2.ncol);
std::cout << "Cuda Computation Start" << std::endl;
start = GetTimeStamp();
cudaMemcpy(a_row, A.row, sizeof(int32_t) * (A.nrow + 1), cudaMemcpyHostToDevice);
cudaMemcpy(a_col, A.col, sizeof(int32_t) * A.nnze, cudaMemcpyHostToDevice);
cudaMemcpy(a_val, A.val, sizeof(float) * A.nnze, cudaMemcpyHostToDevice);
cudaMemcpy(b_val, B.val, sizeof(float) * B.nrow * B.ncol, cudaMemcpyHostToDevice);
copy_size = sizeof(float) * partial_row * C2.ncol;
for (int i = 0; i < depth; i++) {
multiply_kernel<<<dimGrid, dimBlock>>>(a_row, a_col, a_val, b_val, c_val,
A.nrow, B.ncol, i, partial_row);
if (remaining_row < partial_row)
copy_size = sizeof(float) * remaining_row * C2.ncol;
cudaMemcpy(C2.val + (partial_row * i * C2.ncol), c_val,
copy_size, cudaMemcpyDeviceToHost);
remaining_row -= partial_row;
}
end = GetTimeStamp();
std::cout << "Cuda Computation End: " << end - start << " us." << std::endl << std::endl;
time_p = end - start;
printf("Speed up is %g\n", (double) time_s / time_p);
// TODO: Testing Code by comparing C1 and C2
bool is_correct = true;
for (int i = 0; i < C1.nrow && is_correct == true; i++)
for (int j = 0; j < C1.ncol; j++)
if (!nearly_equal(C1.val[i*C1.ncol+j], C2.val[i*C2.ncol+j], 0)) {
printf("i, j: %d %d\n", i, j);
printf("%g %g\n", C1.val[i*C1.ncol+j], C2.val[i*C2.ncol+j]);
is_correct = false;
break;
}
printf("%s\n", is_correct ? "correct" : "wrong");
free(A.row);
free(A.col);
free(A.val);
free(B.val);
if(C1.val != NULL)
free(C1.val);
if(C2.val != NULL)
free(C2.val);
cudaFree(a_row);
cudaFree(a_col);
cudaFree(a_val);
cudaFree(b_val);
cudaFree(c_val);
return 0;
}
bool nearly_equal(float a, float b, float epsilon)
{
float abs_a = abs(a);
float abs_b = abs(b);
float diff = abs(a - b);
if (epsilon == 0)
epsilon = 0.00001f;
if (a == b) { // shortcut, handles infinities
return true;
} else if (a == 0 || b == 0 || diff < FLT_MIN) {
// a or b is zero or both are extremely close to it
// relative error is less meaningful here
return diff < (epsilon * FLT_MIN);
} else { // use relative error
return diff / min((abs_a + abs_b), FLT_MAX) < epsilon;
}
}
|
a9f3093c1d1cfa05823ccfd75408b3359ba6b02a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/sort.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "custring_view.cuh"
#include "regex/regex.cuh"
#include "regex/backref.h"
#include "unicode/is_flags.h"
#include "util.h"
//
NVStrings* NVStrings::slice_replace( const char* repl, int start, int stop )
{
if( !repl )
throw std::invalid_argument("nvstrings::slice_replace parameter cannot be null");
auto execpol = rmm::exec_policy(0);
unsigned int replen = (unsigned int)strlen(repl);
char* d_repl = nullptr;
RMM_ALLOC(&d_repl,replen,0);
hipMemcpy(d_repl,repl,replen,hipMemcpyHostToDevice);
// compute size of output buffer
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_repl, replen, start, stop, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = 0;
if( start < dstr->chars_count() )
len = dstr->replace_size((unsigned)start,(unsigned)(stop-start),d_repl,replen);
else
{ // another odd pandas case: if out-of-bounds, just append
int bytes = dstr->size() + replen;
int nchars = dstr->chars_count() + custring_view::chars_in_string(d_repl,replen);
len = custring_view::alloc_size(bytes,nchars);
}
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_repl )
RMM_FREE(d_repl,0);
return rtn;
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the slice and replace
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_strings, d_repl, replen, start, stop, d_buffer, d_offsets, d_results] __device__(size_t idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
custring_view* dout = nullptr;
if( start < dstr->chars_count() )
dout = dstr->replace((unsigned)start,(unsigned)(stop-start),d_repl,replen,buffer);
else
{ // append for pandas consistency
int bytes = dstr->size();
char* ptr = buffer;
memcpy( ptr, dstr->data(), bytes );
ptr += bytes;
memcpy( ptr, d_repl, replen );
bytes += replen;
dout = custring_view::create_from(buffer,buffer,bytes);
}
d_results[idx] = dout;
});
//
if( d_repl )
RMM_FREE(d_repl,0);
return rtn;
}
// this should replace multiple occurrences up to maxrepl
NVStrings* NVStrings::replace( const char* str, const char* repl, int maxrepl )
{
if( !str || !*str )
throw std::invalid_argument("nvstrings::replace parameter cannot be null or empty");
auto execpol = rmm::exec_policy(0);
unsigned int ssz = (unsigned int)strlen(str);
char* d_str = nullptr;
RMM_ALLOC(&d_str,ssz,0);
hipMemcpy(d_str,str,ssz,hipMemcpyHostToDevice);
unsigned int sszch = custring_view::chars_in_string(str,ssz);
if( !repl )
repl = "";
unsigned int rsz = (unsigned int)strlen(repl);
char* d_repl = nullptr;
RMM_ALLOC(&d_repl,rsz,0);
hipMemcpy(d_repl,repl,rsz,hipMemcpyHostToDevice);
unsigned int rszch = custring_view::chars_in_string(repl,rsz);
// compute size of the output
unsigned int count = size();
custring_view** d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, ssz, sszch, d_repl, rsz, rszch, maxrepl, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int mxn = maxrepl;
if( mxn < 0 )
mxn = dstr->chars_count(); //max possible replaces for this string
unsigned int bytes = dstr->size(), nchars = dstr->chars_count();
int pos = dstr->find(d_str,ssz);
// counting bytes and chars
while((pos >= 0) && (mxn > 0))
{
bytes += rsz - ssz;
nchars += rszch - sszch;
pos = dstr->find(d_str,ssz,(unsigned)pos+sszch); // next one
--mxn;
}
unsigned int size = custring_view::alloc_size(bytes,nchars);
d_sizes[idx] = ALIGN_SIZE(size);
});
//
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
if( d_buffer==0 )
{
RMM_FREE(d_str,0);
RMM_FREE(d_repl,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, ssz, sszch, d_repl, rsz, d_buffer, d_offsets, maxrepl, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int mxn = maxrepl;
if( mxn < 0 )
mxn = dstr->chars_count(); //max possible replaces for this string
//
char* buffer = d_buffer + d_offsets[idx];
char* sptr = dstr->data();
char* optr = buffer;
unsigned int size = dstr->size();
int pos = dstr->find(d_str,ssz), lpos=0;
while((pos >= 0) && (mxn > 0))
{ // i:bbbbsssseeee
int spos = dstr->byte_offset_for(pos); // ^
memcpy(optr,sptr+lpos,spos-lpos); // o:bbbb
optr += spos - lpos; // ^
memcpy(optr,d_repl,rsz); // o:bbbbrrrr
optr += rsz; // ^
lpos = spos + ssz; // i:bbbbsssseeee
pos = dstr->find(d_str,ssz,pos+sszch); // ^
--mxn;
}
memcpy(optr,sptr+lpos,size-lpos); // o:bbbbrrrreeee
unsigned int nsz = (unsigned int)(optr - buffer) + size - lpos;
d_results[idx] = custring_view::create_from(buffer,buffer,nsz);
});
//
RMM_FREE(d_str,0);
RMM_FREE(d_repl,0);
return rtn;
}
// same as above except parameter is regex
NVStrings* NVStrings::replace_re( const char* pattern, const char* repl, int maxrepl )
{
if( !pattern || !*pattern )
throw std::invalid_argument("nvstrings::replace_re parameter cannot be null or empty");
unsigned int count = size();
if( count==0 )
return new NVStrings(count);
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
//
// copy replace string to device memory
if( !repl )
repl = "";
unsigned int rsz = (unsigned int)strlen(repl);
char* d_repl = nullptr;
RMM_ALLOC(&d_repl,rsz,0);
hipMemcpy(d_repl,repl,rsz,hipMemcpyHostToDevice);
unsigned int rszch = custring_view::chars_in_string(repl,rsz);
// compute size of the output
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_repl, rsz, rszch, maxrepl, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int mxn = maxrepl;
if( mxn < 0 )
mxn = dstr->chars_count(); //max possible replaces for this string
unsigned int bytes = dstr->size(), nchars = dstr->chars_count();
int begin = 0, end = (int)nchars;
int result = prog->find(idx,dstr,begin,end);
while((result > 0) && (mxn > 0))
{
bytes += rsz - (dstr->byte_offset_for(end)-dstr->byte_offset_for(begin));
nchars += rszch - (end-begin);
begin = end;
end = (int)nchars;
result = prog->find(idx,dstr,begin,end); // next one
--mxn;
}
unsigned int size = custring_view::alloc_size(bytes,nchars);
d_sizes[idx] = ALIGN_SIZE(size);
});
//
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
if( d_buffer==0 )
{
dreprog::destroy(prog);
RMM_FREE(d_repl,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// do the replace
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_repl, rsz, d_buffer, d_offsets, maxrepl, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int mxn = maxrepl;
int nchars = (int)dstr->chars_count();
if( mxn < 0 )
mxn = nchars; //max possible replaces for this string
char* buffer = d_buffer + d_offsets[idx]; // output buffer
char* sptr = dstr->data(); // input buffer
char* optr = buffer; // running output pointer
unsigned int size = dstr->size(); // number of byte in input string
int lpos = 0, begin = 0, end = nchars; // working vars
// copy input to output replacing strings as we go
int result = prog->find(idx,dstr,begin,end);
while((result > 0) && (mxn > 0))
{ // i:bbbbsssseeee
int spos = dstr->byte_offset_for(begin); // ^
memcpy(optr,sptr+lpos,spos-lpos); // o:bbbb
optr += spos - lpos; // ^
memcpy(optr,d_repl,rsz); // o:bbbbrrrr
optr += rsz; // ^
lpos = dstr->byte_offset_for(end); // i:bbbbsssseeee
begin = end; // ^
end = nchars;
result = prog->find(idx,dstr,begin,end);
--mxn;
} // copy the rest:
memcpy(optr,sptr+lpos,size-lpos); // o:bbbbrrrreeee
unsigned int nsz = (unsigned int)(optr - buffer) + size - lpos;
d_results[idx] = custring_view::create_from(buffer,buffer,nsz);
});
//
dreprog::destroy(prog);
RMM_FREE(d_repl,0);
return rtn;
}
// not even close to the others
NVStrings* NVStrings::replace_with_backrefs( const char* pattern, const char* repl )
{
if( !pattern || !*pattern )
throw std::invalid_argument("nvstrings::replace_with_backrefs parameter cannot be null or empty");
unsigned int count = size();
if( count==0 || repl==0 )
return new NVStrings(count); // returns all nulls
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
//
// parse the repl string for backref indicators
std::vector<thrust::pair<int,int> > brefs;
std::string srepl = parse_backrefs(repl,brefs);
unsigned int rsz = (unsigned int)srepl.size();
char* d_repl = nullptr;
RMM_ALLOC(&d_repl,rsz,0);
hipMemcpy(d_repl,srepl.c_str(),rsz,hipMemcpyHostToDevice);
unsigned int rszch = custring_view::chars_in_string(srepl.c_str(),rsz);
rmm::device_vector<thrust::pair<int,int> > dbrefs(brefs);
auto d_brefs = dbrefs.data().get();
unsigned int refcount = (unsigned int)dbrefs.size();
// if refcount != prog->group_counts() -- probably should throw exception
// compute size of the output
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, rsz, rszch, d_brefs, refcount, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int bytes = dstr->size(), nchars = dstr->chars_count();
int begin = 0, end = (int)nchars;
while( prog->find(idx,dstr,begin,end) > 0 )
{
nchars += rszch - (end-begin);
bytes += rsz - (dstr->byte_offset_for(end)-dstr->byte_offset_for(begin));
for( unsigned int j=0; j < refcount; ++j ) // eval each ref
{
int refidx = d_brefs[j].first; // backref indicator
int spos=begin, epos=end; // modified by extract
if( (prog->extract(idx,dstr,spos,epos,refidx-1)<=0) || (epos <= spos) )
continue; // no value for this ref
nchars += epos - spos; // add up chars
spos = dstr->byte_offset_for(spos); // convert to bytes
bytes += dstr->byte_offset_for(epos) - spos; // add up bytes
}
begin = end;
end = (int)dstr->chars_count();
}
unsigned int size = custring_view::alloc_size(bytes,nchars);
d_sizes[idx] = ALIGN_SIZE(size); // new size for this string
});
//
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
if( d_buffer==0 )
{
dreprog::destroy(prog);
RMM_FREE(d_repl,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// do the replace
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_repl, rsz, d_offsets, d_brefs, refcount, d_buffer, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr ) // abcd-efgh X\1+\2Z
return; // nulls create nulls // ([a-z])-([a-z]) ==> abcXd+eZfgh
char* buffer = d_buffer + d_offsets[idx]; // output buffer
char* optr = buffer; // running output pointer
char* sptr = dstr->data(); // abcd-efgh
int nchars = (int)dstr->chars_count(); // ^
int lpos = 0, begin = 0, end = (int)nchars;
// insert extracted strings left-to-right
while( prog->find(idx,dstr,begin,end) > 0 )
{
// we have found the section that needs to be replaced
int left = dstr->byte_offset_for(begin)-lpos;
memcpy( optr, sptr, left ); // abc________
optr += left; // ^
int ilpos = 0; // last end pos of replace template
char* rptr = d_repl; // running ptr for replace template // X+Z
for( unsigned int j=0; j < refcount; ++j ) // eval each ref // 1st loop 2nd loop
{ // ------------ --------------
int refidx = d_brefs[j].first; // backref number // X+Z X+Z
int ipos = d_brefs[j].second; // insert position // ^ ^
int len = ipos - ilpos; // bytes to copy from input
copy_and_incr_both(optr,rptr,len); // abcX_______ abcXd+_______
ilpos += len; // update last-position
int spos=begin, epos=end; // these are modified by extract
if( (prog->extract(idx,dstr,spos,epos,refidx-1)<=0) || // d e
(epos <= spos) )
continue; // no value for this ref
spos = dstr->byte_offset_for(spos); // convert to bytes
int bytes = dstr->byte_offset_for(epos) - spos;
copy_and_incr(optr,dstr->data()+spos,bytes); // abcXd______ abcXd+e______
}
if( rptr < d_repl+rsz ) // copy remainder of template // abcXd+eZ___
copy_and_incr(optr,rptr,(unsigned int)(d_repl-rptr) + rsz);
lpos = dstr->byte_offset_for(end);
sptr = dstr->data() + lpos; // abcd-efgh
begin = end; // ^
end = (int)dstr->chars_count();
}
if( sptr < dstr->data()+dstr->size() ) // abcXd+eZfgh
copy_and_incr(optr,sptr,(unsigned int)(dstr->data()-sptr) + dstr->size());
unsigned int nsz = (unsigned int)(optr - buffer); // compute output size
d_results[idx] = custring_view::create_from(buffer,buffer,nsz); // new string
});
//
dreprog::destroy(prog);
RMM_FREE(d_repl,0);
return rtn;
}
//
NVStrings* NVStrings::translate( std::pair<unsigned,unsigned>* utable, unsigned int tableSize )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
// convert unicode table into utf8 table
thrust::host_vector< thrust::pair<Char,Char> > htable(tableSize);
for( unsigned int idx=0; idx < tableSize; ++idx )
{
htable[idx].first = u2u8(utable[idx].first);
htable[idx].second = u2u8(utable[idx].second);
}
// could sort on the device; this table should not be very big
thrust::sort(thrust::host, htable.begin(), htable.end(),
[] __host__ (thrust::pair<Char,Char> p1, thrust::pair<Char,Char> p2) { return p1.first > p2.first; });
// copy translate table to device memory
rmm::device_vector< thrust::pair<Char,Char> > table(htable);
thrust::pair<Char,Char>* d_table = table.data().get();
// compute size of each new string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
int tsize = tableSize;
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_table, tsize, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
const char* sptr = dstr->data();
unsigned int bytes = dstr->size();
unsigned int nchars = dstr->chars_count();
for( unsigned int i=0; i < nchars; ++i )
{
Char ch = dstr->at(i);
Char nch = ch;
for( int t=0; t < tsize; ++t ) // replace with faster lookup
nch = ( ch==d_table[t].first ? d_table[t].second : nch );
int bic = custring_view::bytes_in_char(ch);
int nbic = (nch ? custring_view::bytes_in_char(nch) : 0);
bytes += nbic - bic;
if( nch==0 )
--nchars;
}
unsigned int size = custring_view::alloc_size(bytes,nchars);
d_sizes[idx] = ALIGN_SIZE(size);
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// do the translate
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_buffer, d_offsets, d_table, tsize, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
const char* sptr = dstr->data();
unsigned int nchars = dstr->chars_count();
char* optr = buffer;
int nsz = 0;
for( unsigned int i=0; i < nchars; ++i )
{
Char ch = 0;
unsigned int cw = custring_view::char_to_Char(sptr,ch);
Char nch = ch;
for( int t=0; t < tsize; ++t ) // replace with faster lookup
nch = ( ch==d_table[t].first ? d_table[t].second : nch );
sptr += cw;
if( nch==0 )
continue;
unsigned int nbic = custring_view::Char_to_char(nch,optr);
optr += nbic;
nsz += nbic;
}
d_results[idx] = custring_view::create_from(buffer,buffer,nsz);
});
//
return rtn;
}
//
// This will create a new instance replacing any nulls with the provided string.
// The parameter can be an empty string or any other string but not null.
NVStrings* NVStrings::fillna( const char* str )
{
if( str==0 )
throw std::invalid_argument("nvstrings::fillna parameter cannot be null");
auto execpol = rmm::exec_policy(0);
unsigned int ssz = (unsigned int)strlen(str);
unsigned int asz = custring_view::alloc_size(str,ssz);
char* d_str = nullptr;
RMM_ALLOC(&d_str,ssz+1,0);
hipMemcpy(d_str,str,ssz+1,hipMemcpyHostToDevice);
// compute size of the output
unsigned int count = size();
custring_view** d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, asz, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
unsigned int size = asz;
if( dstr )
size = dstr->alloc_size();
d_sizes[idx] = ALIGN_SIZE(size);
});
//
NVStrings* rtn = new NVStrings(count); // create output object
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
rmm::device_vector<size_t> offsets(count,0); // create offsets
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, ssz, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
char* buffer = d_buffer + d_offsets[idx];
if( dstr )
dstr = custring_view::create_from(buffer,*dstr);
else
dstr = custring_view::create_from(buffer,d_str,ssz);
d_results[idx] = dstr;
});
//
RMM_FREE(d_str,0);
return rtn;
}
|
a9f3093c1d1cfa05823ccfd75408b3359ba6b02a.cu
|
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/sort.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "custring_view.cuh"
#include "regex/regex.cuh"
#include "regex/backref.h"
#include "unicode/is_flags.h"
#include "util.h"
//
NVStrings* NVStrings::slice_replace( const char* repl, int start, int stop )
{
if( !repl )
throw std::invalid_argument("nvstrings::slice_replace parameter cannot be null");
auto execpol = rmm::exec_policy(0);
unsigned int replen = (unsigned int)strlen(repl);
char* d_repl = nullptr;
RMM_ALLOC(&d_repl,replen,0);
cudaMemcpy(d_repl,repl,replen,cudaMemcpyHostToDevice);
// compute size of output buffer
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_repl, replen, start, stop, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = 0;
if( start < dstr->chars_count() )
len = dstr->replace_size((unsigned)start,(unsigned)(stop-start),d_repl,replen);
else
{ // another odd pandas case: if out-of-bounds, just append
int bytes = dstr->size() + replen;
int nchars = dstr->chars_count() + custring_view::chars_in_string(d_repl,replen);
len = custring_view::alloc_size(bytes,nchars);
}
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_repl )
RMM_FREE(d_repl,0);
return rtn;
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the slice and replace
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_strings, d_repl, replen, start, stop, d_buffer, d_offsets, d_results] __device__(size_t idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
custring_view* dout = nullptr;
if( start < dstr->chars_count() )
dout = dstr->replace((unsigned)start,(unsigned)(stop-start),d_repl,replen,buffer);
else
{ // append for pandas consistency
int bytes = dstr->size();
char* ptr = buffer;
memcpy( ptr, dstr->data(), bytes );
ptr += bytes;
memcpy( ptr, d_repl, replen );
bytes += replen;
dout = custring_view::create_from(buffer,buffer,bytes);
}
d_results[idx] = dout;
});
//
if( d_repl )
RMM_FREE(d_repl,0);
return rtn;
}
// this should replace multiple occurrences up to maxrepl
NVStrings* NVStrings::replace( const char* str, const char* repl, int maxrepl )
{
if( !str || !*str )
throw std::invalid_argument("nvstrings::replace parameter cannot be null or empty");
auto execpol = rmm::exec_policy(0);
unsigned int ssz = (unsigned int)strlen(str);
char* d_str = nullptr;
RMM_ALLOC(&d_str,ssz,0);
cudaMemcpy(d_str,str,ssz,cudaMemcpyHostToDevice);
unsigned int sszch = custring_view::chars_in_string(str,ssz);
if( !repl )
repl = "";
unsigned int rsz = (unsigned int)strlen(repl);
char* d_repl = nullptr;
RMM_ALLOC(&d_repl,rsz,0);
cudaMemcpy(d_repl,repl,rsz,cudaMemcpyHostToDevice);
unsigned int rszch = custring_view::chars_in_string(repl,rsz);
// compute size of the output
unsigned int count = size();
custring_view** d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, ssz, sszch, d_repl, rsz, rszch, maxrepl, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int mxn = maxrepl;
if( mxn < 0 )
mxn = dstr->chars_count(); //max possible replaces for this string
unsigned int bytes = dstr->size(), nchars = dstr->chars_count();
int pos = dstr->find(d_str,ssz);
// counting bytes and chars
while((pos >= 0) && (mxn > 0))
{
bytes += rsz - ssz;
nchars += rszch - sszch;
pos = dstr->find(d_str,ssz,(unsigned)pos+sszch); // next one
--mxn;
}
unsigned int size = custring_view::alloc_size(bytes,nchars);
d_sizes[idx] = ALIGN_SIZE(size);
});
//
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
if( d_buffer==0 )
{
RMM_FREE(d_str,0);
RMM_FREE(d_repl,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, ssz, sszch, d_repl, rsz, d_buffer, d_offsets, maxrepl, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int mxn = maxrepl;
if( mxn < 0 )
mxn = dstr->chars_count(); //max possible replaces for this string
//
char* buffer = d_buffer + d_offsets[idx];
char* sptr = dstr->data();
char* optr = buffer;
unsigned int size = dstr->size();
int pos = dstr->find(d_str,ssz), lpos=0;
while((pos >= 0) && (mxn > 0))
{ // i:bbbbsssseeee
int spos = dstr->byte_offset_for(pos); // ^
memcpy(optr,sptr+lpos,spos-lpos); // o:bbbb
optr += spos - lpos; // ^
memcpy(optr,d_repl,rsz); // o:bbbbrrrr
optr += rsz; // ^
lpos = spos + ssz; // i:bbbbsssseeee
pos = dstr->find(d_str,ssz,pos+sszch); // ^
--mxn;
}
memcpy(optr,sptr+lpos,size-lpos); // o:bbbbrrrreeee
unsigned int nsz = (unsigned int)(optr - buffer) + size - lpos;
d_results[idx] = custring_view::create_from(buffer,buffer,nsz);
});
//
RMM_FREE(d_str,0);
RMM_FREE(d_repl,0);
return rtn;
}
// same as above except parameter is regex
NVStrings* NVStrings::replace_re( const char* pattern, const char* repl, int maxrepl )
{
if( !pattern || !*pattern )
throw std::invalid_argument("nvstrings::replace_re parameter cannot be null or empty");
unsigned int count = size();
if( count==0 )
return new NVStrings(count);
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
//
// copy replace string to device memory
if( !repl )
repl = "";
unsigned int rsz = (unsigned int)strlen(repl);
char* d_repl = nullptr;
RMM_ALLOC(&d_repl,rsz,0);
cudaMemcpy(d_repl,repl,rsz,cudaMemcpyHostToDevice);
unsigned int rszch = custring_view::chars_in_string(repl,rsz);
// compute size of the output
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_repl, rsz, rszch, maxrepl, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int mxn = maxrepl;
if( mxn < 0 )
mxn = dstr->chars_count(); //max possible replaces for this string
unsigned int bytes = dstr->size(), nchars = dstr->chars_count();
int begin = 0, end = (int)nchars;
int result = prog->find(idx,dstr,begin,end);
while((result > 0) && (mxn > 0))
{
bytes += rsz - (dstr->byte_offset_for(end)-dstr->byte_offset_for(begin));
nchars += rszch - (end-begin);
begin = end;
end = (int)nchars;
result = prog->find(idx,dstr,begin,end); // next one
--mxn;
}
unsigned int size = custring_view::alloc_size(bytes,nchars);
d_sizes[idx] = ALIGN_SIZE(size);
});
//
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
if( d_buffer==0 )
{
dreprog::destroy(prog);
RMM_FREE(d_repl,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// do the replace
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_repl, rsz, d_buffer, d_offsets, maxrepl, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int mxn = maxrepl;
int nchars = (int)dstr->chars_count();
if( mxn < 0 )
mxn = nchars; //max possible replaces for this string
char* buffer = d_buffer + d_offsets[idx]; // output buffer
char* sptr = dstr->data(); // input buffer
char* optr = buffer; // running output pointer
unsigned int size = dstr->size(); // number of byte in input string
int lpos = 0, begin = 0, end = nchars; // working vars
// copy input to output replacing strings as we go
int result = prog->find(idx,dstr,begin,end);
while((result > 0) && (mxn > 0))
{ // i:bbbbsssseeee
int spos = dstr->byte_offset_for(begin); // ^
memcpy(optr,sptr+lpos,spos-lpos); // o:bbbb
optr += spos - lpos; // ^
memcpy(optr,d_repl,rsz); // o:bbbbrrrr
optr += rsz; // ^
lpos = dstr->byte_offset_for(end); // i:bbbbsssseeee
begin = end; // ^
end = nchars;
result = prog->find(idx,dstr,begin,end);
--mxn;
} // copy the rest:
memcpy(optr,sptr+lpos,size-lpos); // o:bbbbrrrreeee
unsigned int nsz = (unsigned int)(optr - buffer) + size - lpos;
d_results[idx] = custring_view::create_from(buffer,buffer,nsz);
});
//
dreprog::destroy(prog);
RMM_FREE(d_repl,0);
return rtn;
}
// not even close to the others
NVStrings* NVStrings::replace_with_backrefs( const char* pattern, const char* repl )
{
if( !pattern || !*pattern )
throw std::invalid_argument("nvstrings::replace_with_backrefs parameter cannot be null or empty");
unsigned int count = size();
if( count==0 || repl==0 )
return new NVStrings(count); // returns all nulls
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags(),count);
delete ptn32;
//
// parse the repl string for backref indicators
std::vector<thrust::pair<int,int> > brefs;
std::string srepl = parse_backrefs(repl,brefs);
unsigned int rsz = (unsigned int)srepl.size();
char* d_repl = nullptr;
RMM_ALLOC(&d_repl,rsz,0);
cudaMemcpy(d_repl,srepl.c_str(),rsz,cudaMemcpyHostToDevice);
unsigned int rszch = custring_view::chars_in_string(srepl.c_str(),rsz);
rmm::device_vector<thrust::pair<int,int> > dbrefs(brefs);
auto d_brefs = dbrefs.data().get();
unsigned int refcount = (unsigned int)dbrefs.size();
// if refcount != prog->group_counts() -- probably should throw exception
// compute size of the output
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, rsz, rszch, d_brefs, refcount, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int bytes = dstr->size(), nchars = dstr->chars_count();
int begin = 0, end = (int)nchars;
while( prog->find(idx,dstr,begin,end) > 0 )
{
nchars += rszch - (end-begin);
bytes += rsz - (dstr->byte_offset_for(end)-dstr->byte_offset_for(begin));
for( unsigned int j=0; j < refcount; ++j ) // eval each ref
{
int refidx = d_brefs[j].first; // backref indicator
int spos=begin, epos=end; // modified by extract
if( (prog->extract(idx,dstr,spos,epos,refidx-1)<=0) || (epos <= spos) )
continue; // no value for this ref
nchars += epos - spos; // add up chars
spos = dstr->byte_offset_for(spos); // convert to bytes
bytes += dstr->byte_offset_for(epos) - spos; // add up bytes
}
begin = end;
end = (int)dstr->chars_count();
}
unsigned int size = custring_view::alloc_size(bytes,nchars);
d_sizes[idx] = ALIGN_SIZE(size); // new size for this string
});
//
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
if( d_buffer==0 )
{
dreprog::destroy(prog);
RMM_FREE(d_repl,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// do the replace
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[prog, d_strings, d_repl, rsz, d_offsets, d_brefs, refcount, d_buffer, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr ) // abcd-efgh X\1+\2Z
return; // nulls create nulls // ([a-z])-([a-z]) ==> abcXd+eZfgh
char* buffer = d_buffer + d_offsets[idx]; // output buffer
char* optr = buffer; // running output pointer
char* sptr = dstr->data(); // abcd-efgh
int nchars = (int)dstr->chars_count(); // ^
int lpos = 0, begin = 0, end = (int)nchars;
// insert extracted strings left-to-right
while( prog->find(idx,dstr,begin,end) > 0 )
{
// we have found the section that needs to be replaced
int left = dstr->byte_offset_for(begin)-lpos;
memcpy( optr, sptr, left ); // abc________
optr += left; // ^
int ilpos = 0; // last end pos of replace template
char* rptr = d_repl; // running ptr for replace template // X+Z
for( unsigned int j=0; j < refcount; ++j ) // eval each ref // 1st loop 2nd loop
{ // ------------ --------------
int refidx = d_brefs[j].first; // backref number // X+Z X+Z
int ipos = d_brefs[j].second; // insert position // ^ ^
int len = ipos - ilpos; // bytes to copy from input
copy_and_incr_both(optr,rptr,len); // abcX_______ abcXd+_______
ilpos += len; // update last-position
int spos=begin, epos=end; // these are modified by extract
if( (prog->extract(idx,dstr,spos,epos,refidx-1)<=0) || // d e
(epos <= spos) )
continue; // no value for this ref
spos = dstr->byte_offset_for(spos); // convert to bytes
int bytes = dstr->byte_offset_for(epos) - spos;
copy_and_incr(optr,dstr->data()+spos,bytes); // abcXd______ abcXd+e______
}
if( rptr < d_repl+rsz ) // copy remainder of template // abcXd+eZ___
copy_and_incr(optr,rptr,(unsigned int)(d_repl-rptr) + rsz);
lpos = dstr->byte_offset_for(end);
sptr = dstr->data() + lpos; // abcd-efgh
begin = end; // ^
end = (int)dstr->chars_count();
}
if( sptr < dstr->data()+dstr->size() ) // abcXd+eZfgh
copy_and_incr(optr,sptr,(unsigned int)(dstr->data()-sptr) + dstr->size());
unsigned int nsz = (unsigned int)(optr - buffer); // compute output size
d_results[idx] = custring_view::create_from(buffer,buffer,nsz); // new string
});
//
dreprog::destroy(prog);
RMM_FREE(d_repl,0);
return rtn;
}
//
NVStrings* NVStrings::translate( std::pair<unsigned,unsigned>* utable, unsigned int tableSize )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
// convert unicode table into utf8 table
thrust::host_vector< thrust::pair<Char,Char> > htable(tableSize);
for( unsigned int idx=0; idx < tableSize; ++idx )
{
htable[idx].first = u2u8(utable[idx].first);
htable[idx].second = u2u8(utable[idx].second);
}
// could sort on the device; this table should not be very big
thrust::sort(thrust::host, htable.begin(), htable.end(),
[] __host__ (thrust::pair<Char,Char> p1, thrust::pair<Char,Char> p2) { return p1.first > p2.first; });
// copy translate table to device memory
rmm::device_vector< thrust::pair<Char,Char> > table(htable);
thrust::pair<Char,Char>* d_table = table.data().get();
// compute size of each new string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
int tsize = tableSize;
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_table, tsize, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
const char* sptr = dstr->data();
unsigned int bytes = dstr->size();
unsigned int nchars = dstr->chars_count();
for( unsigned int i=0; i < nchars; ++i )
{
Char ch = dstr->at(i);
Char nch = ch;
for( int t=0; t < tsize; ++t ) // replace with faster lookup
nch = ( ch==d_table[t].first ? d_table[t].second : nch );
int bic = custring_view::bytes_in_char(ch);
int nbic = (nch ? custring_view::bytes_in_char(nch) : 0);
bytes += nbic - bic;
if( nch==0 )
--nchars;
}
unsigned int size = custring_view::alloc_size(bytes,nchars);
d_sizes[idx] = ALIGN_SIZE(size);
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// do the translate
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_buffer, d_offsets, d_table, tsize, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
const char* sptr = dstr->data();
unsigned int nchars = dstr->chars_count();
char* optr = buffer;
int nsz = 0;
for( unsigned int i=0; i < nchars; ++i )
{
Char ch = 0;
unsigned int cw = custring_view::char_to_Char(sptr,ch);
Char nch = ch;
for( int t=0; t < tsize; ++t ) // replace with faster lookup
nch = ( ch==d_table[t].first ? d_table[t].second : nch );
sptr += cw;
if( nch==0 )
continue;
unsigned int nbic = custring_view::Char_to_char(nch,optr);
optr += nbic;
nsz += nbic;
}
d_results[idx] = custring_view::create_from(buffer,buffer,nsz);
});
//
return rtn;
}
//
// This will create a new instance replacing any nulls with the provided string.
// The parameter can be an empty string or any other string but not null.
NVStrings* NVStrings::fillna( const char* str )
{
if( str==0 )
throw std::invalid_argument("nvstrings::fillna parameter cannot be null");
auto execpol = rmm::exec_policy(0);
unsigned int ssz = (unsigned int)strlen(str);
unsigned int asz = custring_view::alloc_size(str,ssz);
char* d_str = nullptr;
RMM_ALLOC(&d_str,ssz+1,0);
cudaMemcpy(d_str,str,ssz+1,cudaMemcpyHostToDevice);
// compute size of the output
unsigned int count = size();
custring_view** d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, asz, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
unsigned int size = asz;
if( dstr )
size = dstr->alloc_size();
d_sizes[idx] = ALIGN_SIZE(size);
});
//
NVStrings* rtn = new NVStrings(count); // create output object
char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes);
rmm::device_vector<size_t> offsets(count,0); // create offsets
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_str, ssz, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
char* buffer = d_buffer + d_offsets[idx];
if( dstr )
dstr = custring_view::create_from(buffer,*dstr);
else
dstr = custring_view::create_from(buffer,d_str,ssz);
d_results[idx] = dstr;
});
//
RMM_FREE(d_str,0);
return rtn;
}
|
e7304c365e4bf55844295d7b7d6fc7edf24face8.hip
|
// !!! This is a file automatically generated by hipify!!!
// Compile into a shared library for ctypes (required for "mandelbrot_ctypes.py")
// Windows: "nvcc -shared -o mandelbrot.dll mandelbrot.cu"
// Linux: "nvcc -Xcompiler -fPIC -shared -o mandelbrot.so mandelbrot.cu"
// Compile into a PTX binary (required for "mandelbrot_ptx.py" and "mandelbrot_driver.py")
// For both Windows and Linux: "nvcc -ptx -o mandelbrot.ptx mandelbrot.cu"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
extern "C" __global__ void mandelbrot_ker(float * lattice, float * mandelbrot_graph, int max_iters, float upper_bound_squared, int lattice_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < lattice_size*lattice_size )
{
int i = tid % lattice_size;
int j = lattice_size - 1 - (tid / lattice_size);
float c_re = lattice[i];
float c_im = lattice[j];
float z_re = 0.0f;
float z_im = 0.0f;
mandelbrot_graph[tid] = 1;
for (int k = 0; k < max_iters; k++)
{
float temp;
temp = z_re*z_re - z_im*z_im + c_re;
z_im = 2*z_re*z_im + c_im;
z_re = temp;
if ( (z_re*z_re + z_im*z_im) > upper_bound_squared )
{
mandelbrot_graph[tid] = 0;
break;
}
}
}
return;
}
// Linux users: remove "__declspec(dllexport)" from the line below.
extern "C" __declspec(dllexport) void launch_mandelbrot(float * lattice, float * mandelbrot_graph, int max_iters, float upper_bound, int lattice_size)
{
int num_bytes_lattice = sizeof(float) * lattice_size;
int num_bytes_graph = sizeof(float)* lattice_size*lattice_size;
float * d_lattice;
float * d_mandelbrot_graph;
hipMalloc((float **) &d_lattice, num_bytes_lattice);
hipMalloc((float **) &d_mandelbrot_graph, num_bytes_graph);
hipMemcpy(d_lattice, lattice, num_bytes_lattice, hipMemcpyHostToDevice);
int grid_size = (int) ceil( ( (double) lattice_size*lattice_size ) / ( (double) 32 ) );
hipLaunchKernelGGL(( mandelbrot_ker) , dim3(grid_size), dim3(32) , 0, 0, d_lattice, d_mandelbrot_graph, max_iters, upper_bound*upper_bound, lattice_size);
hipMemcpy(mandelbrot_graph, d_mandelbrot_graph, num_bytes_graph, hipMemcpyDeviceToHost);
hipFree(d_lattice);
hipFree(d_mandelbrot_graph);
}
|
e7304c365e4bf55844295d7b7d6fc7edf24face8.cu
|
// Compile into a shared library for ctypes (required for "mandelbrot_ctypes.py")
// Windows: "nvcc -shared -o mandelbrot.dll mandelbrot.cu"
// Linux: "nvcc -Xcompiler -fPIC -shared -o mandelbrot.so mandelbrot.cu"
// Compile into a PTX binary (required for "mandelbrot_ptx.py" and "mandelbrot_driver.py")
// For both Windows and Linux: "nvcc -ptx -o mandelbrot.ptx mandelbrot.cu"
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
extern "C" __global__ void mandelbrot_ker(float * lattice, float * mandelbrot_graph, int max_iters, float upper_bound_squared, int lattice_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < lattice_size*lattice_size )
{
int i = tid % lattice_size;
int j = lattice_size - 1 - (tid / lattice_size);
float c_re = lattice[i];
float c_im = lattice[j];
float z_re = 0.0f;
float z_im = 0.0f;
mandelbrot_graph[tid] = 1;
for (int k = 0; k < max_iters; k++)
{
float temp;
temp = z_re*z_re - z_im*z_im + c_re;
z_im = 2*z_re*z_im + c_im;
z_re = temp;
if ( (z_re*z_re + z_im*z_im) > upper_bound_squared )
{
mandelbrot_graph[tid] = 0;
break;
}
}
}
return;
}
// Linux users: remove "__declspec(dllexport)" from the line below.
extern "C" __declspec(dllexport) void launch_mandelbrot(float * lattice, float * mandelbrot_graph, int max_iters, float upper_bound, int lattice_size)
{
int num_bytes_lattice = sizeof(float) * lattice_size;
int num_bytes_graph = sizeof(float)* lattice_size*lattice_size;
float * d_lattice;
float * d_mandelbrot_graph;
cudaMalloc((float **) &d_lattice, num_bytes_lattice);
cudaMalloc((float **) &d_mandelbrot_graph, num_bytes_graph);
cudaMemcpy(d_lattice, lattice, num_bytes_lattice, cudaMemcpyHostToDevice);
int grid_size = (int) ceil( ( (double) lattice_size*lattice_size ) / ( (double) 32 ) );
mandelbrot_ker <<< grid_size, 32 >>> (d_lattice, d_mandelbrot_graph, max_iters, upper_bound*upper_bound, lattice_size);
cudaMemcpy(mandelbrot_graph, d_mandelbrot_graph, num_bytes_graph, cudaMemcpyDeviceToHost);
cudaFree(d_lattice);
cudaFree(d_mandelbrot_graph);
}
|
4e273a487a35e091142fda288948da00114b57c6.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @file density_overflow_cuda_thread_map_kernel.cu
* @author Yibo Lin
* @date Jun 2018
* @brief Compute density map on CUDA with cell2bin parallelization
*/
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "hip/hip_runtime.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void computeDensityMapWithThreadMap(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int* thread2node_map, const int* thread2bin_x_map, const int* thread2bin_y_map,
const int num_threads,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T* density_map_tensor)
{
// rank-one update density map
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_threads; i += blockDim.x * gridDim.x)
{
// density overflow function
auto computeDensityOverflowFunc = [](T x, T node_size, T bin_center, T bin_size){
return max(T(0.0), min(x+node_size, bin_center+bin_size/2) - max(x, bin_center-bin_size/2));
};
int node_id = thread2node_map[i];
int bin_offset_x = thread2bin_x_map[i];
int bin_offset_y = thread2bin_y_map[i];
// x direction
int bin_index_xl = int((x_tensor[node_id]-xl)/bin_size_x);
bin_index_xl = max(bin_index_xl, 0);
int k = bin_index_xl+bin_offset_x;
if (k+1 > num_bins_x)
{
continue;
}
// y direction
int bin_index_yl = int((y_tensor[node_id]-yl)/bin_size_y);
bin_index_yl = max(bin_index_yl, 0);
int h = bin_index_yl+bin_offset_y;
if (h+1 > num_bins_y)
{
continue;
}
T px = computeDensityOverflowFunc(x_tensor[node_id], node_size_x_tensor[node_id], bin_center_x_tensor[k], bin_size_x);
T py = computeDensityOverflowFunc(y_tensor[node_id], node_size_y_tensor[node_id], bin_center_y_tensor[h], bin_size_y);
// still area
atomicAdd(&density_map_tensor[k*num_bins_y+h], px*py);
__syncthreads();
}
}
template <typename T>
int computeDensityOverflowMapCudaThreadMapLauncher(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int* thread2node_map, const int* thread2bin_x_map, const int* thread2bin_y_map,
const int num_threads,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T* density_map_tensor
)
{
int block_count = 32;
int thread_count = 1024;
hipLaunchKernelGGL(( computeDensityMapWithThreadMap), dim3(block_count), dim3(thread_count), 0, 0,
x_tensor, y_tensor,
node_size_x_tensor, node_size_y_tensor,
bin_center_x_tensor, bin_center_y_tensor,
thread2node_map, thread2bin_x_map, thread2bin_y_map,
num_threads,
num_nodes,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
density_map_tensor);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
int instantiateComputeDensityOverflowMapLauncher(\
const T* x_tensor, const T* y_tensor, \
const T* node_size_x_tensor, const T* node_size_y_tensor, \
const T* bin_center_x_tensor, const T* bin_center_y_tensor, \
const int* thread2node_map, const int* thread2bin_x_map, const int* thread2bin_y_map, \
const int num_threads, \
const int num_nodes, \
const int num_bins_x, const int num_bins_y, \
const T xl, const T yl, const T xh, const T yh, \
const T bin_size_x, const T bin_size_y, \
T* density_map_tensor\
)\
{ \
return computeDensityOverflowMapCudaThreadMapLauncher(\
x_tensor, y_tensor, \
node_size_x_tensor, node_size_y_tensor, \
bin_center_x_tensor, bin_center_y_tensor, \
thread2node_map, thread2bin_x_map, thread2bin_y_map, \
num_threads, \
num_nodes, \
num_bins_x, num_bins_y, \
xl, yl, xh, yh, \
bin_size_x, bin_size_y, \
density_map_tensor\
);\
}
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
4e273a487a35e091142fda288948da00114b57c6.cu
|
/**
* @file density_overflow_cuda_thread_map_kernel.cu
* @author Yibo Lin
* @date Jun 2018
* @brief Compute density map on CUDA with cell2bin parallelization
*/
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "cuda_runtime.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void computeDensityMapWithThreadMap(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int* thread2node_map, const int* thread2bin_x_map, const int* thread2bin_y_map,
const int num_threads,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T* density_map_tensor)
{
// rank-one update density map
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_threads; i += blockDim.x * gridDim.x)
{
// density overflow function
auto computeDensityOverflowFunc = [](T x, T node_size, T bin_center, T bin_size){
return max(T(0.0), min(x+node_size, bin_center+bin_size/2) - max(x, bin_center-bin_size/2));
};
int node_id = thread2node_map[i];
int bin_offset_x = thread2bin_x_map[i];
int bin_offset_y = thread2bin_y_map[i];
// x direction
int bin_index_xl = int((x_tensor[node_id]-xl)/bin_size_x);
bin_index_xl = max(bin_index_xl, 0);
int k = bin_index_xl+bin_offset_x;
if (k+1 > num_bins_x)
{
continue;
}
// y direction
int bin_index_yl = int((y_tensor[node_id]-yl)/bin_size_y);
bin_index_yl = max(bin_index_yl, 0);
int h = bin_index_yl+bin_offset_y;
if (h+1 > num_bins_y)
{
continue;
}
T px = computeDensityOverflowFunc(x_tensor[node_id], node_size_x_tensor[node_id], bin_center_x_tensor[k], bin_size_x);
T py = computeDensityOverflowFunc(y_tensor[node_id], node_size_y_tensor[node_id], bin_center_y_tensor[h], bin_size_y);
// still area
atomicAdd(&density_map_tensor[k*num_bins_y+h], px*py);
__syncthreads();
}
}
template <typename T>
int computeDensityOverflowMapCudaThreadMapLauncher(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int* thread2node_map, const int* thread2bin_x_map, const int* thread2bin_y_map,
const int num_threads,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T* density_map_tensor
)
{
int block_count = 32;
int thread_count = 1024;
computeDensityMapWithThreadMap<<<block_count, thread_count>>>(
x_tensor, y_tensor,
node_size_x_tensor, node_size_y_tensor,
bin_center_x_tensor, bin_center_y_tensor,
thread2node_map, thread2bin_x_map, thread2bin_y_map,
num_threads,
num_nodes,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
density_map_tensor);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
int instantiateComputeDensityOverflowMapLauncher(\
const T* x_tensor, const T* y_tensor, \
const T* node_size_x_tensor, const T* node_size_y_tensor, \
const T* bin_center_x_tensor, const T* bin_center_y_tensor, \
const int* thread2node_map, const int* thread2bin_x_map, const int* thread2bin_y_map, \
const int num_threads, \
const int num_nodes, \
const int num_bins_x, const int num_bins_y, \
const T xl, const T yl, const T xh, const T yh, \
const T bin_size_x, const T bin_size_y, \
T* density_map_tensor\
)\
{ \
return computeDensityOverflowMapCudaThreadMapLauncher(\
x_tensor, y_tensor, \
node_size_x_tensor, node_size_y_tensor, \
bin_center_x_tensor, bin_center_y_tensor, \
thread2node_map, thread2bin_x_map, thread2bin_y_map, \
num_threads, \
num_nodes, \
num_bins_x, num_bins_y, \
xl, yl, xh, yh, \
bin_size_x, bin_size_y, \
density_map_tensor\
);\
}
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
0464aec77b58f18bdf7f188dbf4860de3a6ea59e.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 4096 * 4
#define NY 4096 * 4
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A, DATA_TYPE *x_gpu, DATA_TYPE *A_gpu)
{
int i, j;
for (i = 0; i < NX; i++)
{
x[i] = i * M_PI;
x_gpu[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
A_gpu[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
}
}
}
void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<NY; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i] += A[i * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
int i;
for(i=0; i < NX; i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp)
{
int i,j;
for (i= 0; i < NY; i++)
{
y[i] = 0;
}
for (i = 0; i < NX; i++)
{
tmp[i] = 0;
for (j = 0; j < NY; j++)
{
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++)
{
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
void ataxGpu(DATA_TYPE* A_gpu, DATA_TYPE* x_gpu, DATA_TYPE* y_gpu, DATA_TYPE* tmp_gpu)
{
double t_start, t_end;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
t_start = rtclock();
hipLaunchKernelGGL(( atax_kernel1), dim3(grid1), dim3(block) , 0, 0, A_gpu,x_gpu,tmp_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( atax_kernel2), dim3(grid2), dim3(block) , 0, 0, A_gpu,y_gpu,tmp_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* tmp;
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
// DATA_TYPE* tmp;
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
hipMallocManaged(&A_gpu, sizeof(DATA_TYPE) * NX * NY);
hipMallocManaged(&x_gpu, sizeof(DATA_TYPE) * NY);
hipMallocManaged(&y_gpu, sizeof(DATA_TYPE) * NY);
hipMallocManaged(&tmp_gpu, sizeof(DATA_TYPE) * NX);
init_array(x, A, x_gpu, A_gpu);
GPU_argv_init();
ataxGpu(A_gpu, x_gpu, y_gpu, tmp_gpu);
t_start = rtclock();
atax_cpu(A, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_gpu);
free(A);
free(x);
free(y);
free(tmp);
hipFree(A_gpu);
hipFree(x_gpu);
hipFree(y_gpu);
hipFree(tmp_gpu);
return 0;
}
|
0464aec77b58f18bdf7f188dbf4860de3a6ea59e.cu
|
/**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 4096 * 4
#define NY 4096 * 4
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A, DATA_TYPE *x_gpu, DATA_TYPE *A_gpu)
{
int i, j;
for (i = 0; i < NX; i++)
{
x[i] = i * M_PI;
x_gpu[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
A_gpu[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
}
}
}
void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<NY; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i] += A[i * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
int i;
for(i=0; i < NX; i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp)
{
int i,j;
for (i= 0; i < NY; i++)
{
y[i] = 0;
}
for (i = 0; i < NX; i++)
{
tmp[i] = 0;
for (j = 0; j < NY; j++)
{
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++)
{
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
void ataxGpu(DATA_TYPE* A_gpu, DATA_TYPE* x_gpu, DATA_TYPE* y_gpu, DATA_TYPE* tmp_gpu)
{
double t_start, t_end;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
t_start = rtclock();
atax_kernel1<<< grid1, block >>>(A_gpu,x_gpu,tmp_gpu);
cudaDeviceSynchronize();
atax_kernel2<<< grid2, block >>>(A_gpu,y_gpu,tmp_gpu);
cudaDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* tmp;
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
// DATA_TYPE* tmp;
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
cudaMallocManaged(&A_gpu, sizeof(DATA_TYPE) * NX * NY);
cudaMallocManaged(&x_gpu, sizeof(DATA_TYPE) * NY);
cudaMallocManaged(&y_gpu, sizeof(DATA_TYPE) * NY);
cudaMallocManaged(&tmp_gpu, sizeof(DATA_TYPE) * NX);
init_array(x, A, x_gpu, A_gpu);
GPU_argv_init();
ataxGpu(A_gpu, x_gpu, y_gpu, tmp_gpu);
t_start = rtclock();
atax_cpu(A, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_gpu);
free(A);
free(x);
free(y);
free(tmp);
cudaFree(A_gpu);
cudaFree(x_gpu);
cudaFree(y_gpu);
cudaFree(tmp_gpu);
return 0;
}
|
9fb8f05c22bdf026c48ddd2a4cbe0a38ec1c6bc6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* File: MD5_GPU.cu
* Authors: Stefano Charissis, Jin-Long Lee and Peter Wong
*
* Description:
* This is the CUDA code. It consists of a kernel, some device functions and helper functions.
* The MD5-specific code (eg. most of the #defines) are taken from Ronald Rivests official MD5 implementation.
*/
#include "MD5_GPU.h"
#include <cmath>
#include <cstdlib>
#include <string>
#include <iostream>
#include <utility>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include "Utility.h"
using namespace std;
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
// The MD5 digest to match.
#define NUM_DIGEST_SIZE 4
__device__ __constant__ uint d_targetDigest[NUM_DIGEST_SIZE];
// Target character set.
#define NUM_POWER_SYMBOLS 96
__device__ __constant__ uchar d_powerSymbols[NUM_POWER_SYMBOLS];
// Power values used for permutation of new messages.
#define NUM_POWER_VALUES 16
__constant__ float d_powerValues[NUM_POWER_VALUES];
__global__ void doMD5(float*, float, size_t, size_t, float*, uchar*);
// char to uchar conversion
uchar c2c (char c){
return (uchar)((c > '9') ? (c - 'a' + 10) : (c - '0'));
}
void initialiseGPU(string targetDigest, string targetCharset) {
// Reverse target endianess
uint h_targetDigest[4];
for (int c=0;c<targetDigest.size();c+=8) {
uint x = c2c(targetDigest[c]) <<4 | c2c(targetDigest[c+1]);
uint y = c2c(targetDigest[c+2]) << 4 | c2c(targetDigest[c+3]);
uint z = c2c(targetDigest[c+4]) << 4 | c2c(targetDigest[c+5]);
uint w = c2c(targetDigest[c+6]) << 4 | c2c(targetDigest[c+7]);
h_targetDigest[c/8] = w << 24 | z << 16 | y << 8 | x;
}
// Copy target digest from host to GPU.
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_targetDigest, h_targetDigest,
NUM_DIGEST_SIZE * sizeof(uint), 0, hipMemcpyHostToDevice));
// Copy target character set from host to GPU.
uchar h_powerSymbols[NUM_POWER_SYMBOLS];
for (size_t i = 0; i != targetCharset.length(); ++i)
h_powerSymbols[i] = targetCharset[i];
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_powerSymbols, h_powerSymbols,
NUM_POWER_SYMBOLS * sizeof(uchar)));
// Copy power values used for permutation from host to GPU.
float charsetLen = targetCharset.length();
float h_powerValues[NUM_POWER_VALUES];
for (size_t i = 0; i != NUM_POWER_VALUES; ++i)
h_powerValues[i] = pow(charsetLen, (float)(NUM_POWER_VALUES - i - 1));
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_powerValues, h_powerValues,
NUM_POWER_VALUES * sizeof(float)));
}
pair<bool, string> findMessage(size_t min, size_t max, size_t charsetLength) {
bool isFound = false;
string message;
// BlockSize and ThreadPerBlock Configuration
// Optimal value pairs are system-dependant
// Experimentation is necessary
int nBlocks = 8; //8
int nThreadsPerBlock = 256; //256
//8 x 256 seems best on my system (256MB 8600GT, E6600)
// messageNumber; used to indicate a result
float* d_messageNumber;
float h_messageNumber = -1;
//Copy messageNumber from host to device
CUDA_SAFE_CALL(hipMalloc((void**)&d_messageNumber, sizeof(float)));
CUDA_SAFE_CALL(hipMemcpy(d_messageNumber, &h_messageNumber,
sizeof(float), hipMemcpyHostToDevice));
float* d_startNumbers;
CUDA_SAFE_CALL(hipMalloc((void**)&d_startNumbers, (nBlocks * nThreadsPerBlock) * sizeof(float)));
float h_startNumbers[(nBlocks * nThreadsPerBlock)];
uchar* d_message;
CUDA_SAFE_CALL(hipMalloc((void**)&d_message, 16 * sizeof(uchar)));
uchar h_message[16];
for (size_t size = min; size <= max; ++size) {
float maxValue = pow((float)charsetLength, (float)size);
float nIterations = ceil(maxValue / (nBlocks * nThreadsPerBlock));
for (size_t i = 0; i != (nBlocks * nThreadsPerBlock); ++i) {
h_startNumbers[i] = i * nIterations;
}
CUDA_SAFE_CALL(hipMemcpy(d_startNumbers, h_startNumbers,
(nBlocks * nThreadsPerBlock) * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( doMD5), dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, d_startNumbers, nIterations, charsetLength, size, d_messageNumber, d_message);
hipDeviceSynchronize();
cout << size << endl;
printf("%s\n", hipGetErrorString(hipGetLastError()));
CUDA_SAFE_CALL(hipMemcpy(&h_messageNumber, d_messageNumber,
sizeof(float), hipMemcpyDeviceToHost));
if (h_messageNumber != -1) {
printf("%f\n", h_messageNumber);
CUDA_SAFE_CALL(hipMemcpy(h_message, d_message,
16 * sizeof(uchar), hipMemcpyDeviceToHost));
string message;
for (size_t i = 0; i != size; ++i)
message.push_back(h_message[i]);
cout << message << endl;
break;
}
}
CUDA_SAFE_CALL(hipFree(d_startNumbers));
CUDA_SAFE_CALL(hipFree(d_message));
return make_pair(isFound, message);
}
__global__ void doMD5(float* d_startNumbers, float nIterations, size_t charsetLength, size_t size, float* d_messageNumber, uchar* message) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
float maxValue = powf(__uint2float_rz(charsetLength), __uint2float_rz(size));
uint in[17];
// Zero out the chunk to hash.
for (size_t i = 0; i != 17; ++i)
in[i] = 0x00000000;
// Put the message length in bits.
in[14] = size << 3;
uchar* toHashAsChar = (uchar*)in;
// Pads the string to the required length.
for (size_t i = 0; i != size; ++i)
toHashAsChar[i] = d_powerSymbols[0];
// Put the 1 bit (as per MD5 spec)
toHashAsChar[size] = 0x80;
float numberToConvert = d_startNumbers[idx];
size_t toHashAsCharIndices[17];
if (numberToConvert < maxValue) {
for (size_t i = 0; i != size; ++i) {
toHashAsCharIndices[i] = __float2uint_rz(floorf(numberToConvert / d_powerValues[NUM_POWER_VALUES - size + i]));
numberToConvert = floorf(fmodf(numberToConvert, d_powerValues[NUM_POWER_VALUES - size + i]));
}
// #pragma unroll is a compiler-based optimisation; loops are unrolled
#pragma unroll 3
for (float iterationsDone = 0; iterationsDone != nIterations; ++iterationsDone) {
if (*d_messageNumber == 1)
break;
for (size_t i = 0; i != size; ++i)
toHashAsChar[i] = d_powerSymbols[toHashAsCharIndices[i]];
uint h0 = 0x67452301;
uint h1 = 0xEFCDAB89;
uint h2 = 0x98BADCFE;
uint h3 = 0x10325476;
uint a = h0;
uint b = h1;
uint c = h2;
uint d = h3;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF ( a, b, c, d, in[ 0], S11, 3614090360); /* 1 */
FF ( d, a, b, c, in[ 1], S12, 3905402710); /* 2 */
FF ( c, d, a, b, in[ 2], S13, 606105819); /* 3 */
FF ( b, c, d, a, in[ 3], S14, 3250441966); /* 4 */
FF ( a, b, c, d, in[ 4], S11, 4118548399); /* 5 */
FF ( d, a, b, c, in[ 5], S12, 1200080426); /* 6 */
FF ( c, d, a, b, in[ 6], S13, 2821735955); /* 7 */
FF ( b, c, d, a, in[ 7], S14, 4249261313); /* 8 */
FF ( a, b, c, d, in[ 8], S11, 1770035416); /* 9 */
FF ( d, a, b, c, in[ 9], S12, 2336552879); /* 10 */
FF ( c, d, a, b, in[10], S13, 4294925233); /* 11 */
FF ( b, c, d, a, in[11], S14, 2304563134); /* 12 */
FF ( a, b, c, d, in[12], S11, 1804603682); /* 13 */
FF ( d, a, b, c, in[13], S12, 4254626195); /* 14 */
FF ( c, d, a, b, in[14], S13, 2792965006); /* 15 */
FF ( b, c, d, a, in[15], S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG ( a, b, c, d, in[ 1], S21, 4129170786); /* 17 */
GG ( d, a, b, c, in[ 6], S22, 3225465664); /* 18 */
GG ( c, d, a, b, in[11], S23, 643717713); /* 19 */
GG ( b, c, d, a, in[ 0], S24, 3921069994); /* 20 */
GG ( a, b, c, d, in[ 5], S21, 3593408605); /* 21 */
GG ( d, a, b, c, in[10], S22, 38016083); /* 22 */
GG ( c, d, a, b, in[15], S23, 3634488961); /* 23 */
GG ( b, c, d, a, in[ 4], S24, 3889429448); /* 24 */
GG ( a, b, c, d, in[ 9], S21, 568446438); /* 25 */
GG ( d, a, b, c, in[14], S22, 3275163606); /* 26 */
GG ( c, d, a, b, in[ 3], S23, 4107603335); /* 27 */
GG ( b, c, d, a, in[ 8], S24, 1163531501); /* 28 */
GG ( a, b, c, d, in[13], S21, 2850285829); /* 29 */
GG ( d, a, b, c, in[ 2], S22, 4243563512); /* 30 */
GG ( c, d, a, b, in[ 7], S23, 1735328473); /* 31 */
GG ( b, c, d, a, in[12], S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH ( a, b, c, d, in[ 5], S31, 4294588738); /* 33 */
HH ( d, a, b, c, in[ 8], S32, 2272392833); /* 34 */
HH ( c, d, a, b, in[11], S33, 1839030562); /* 35 */
HH ( b, c, d, a, in[14], S34, 4259657740); /* 36 */
HH ( a, b, c, d, in[ 1], S31, 2763975236); /* 37 */
HH ( d, a, b, c, in[ 4], S32, 1272893353); /* 38 */
HH ( c, d, a, b, in[ 7], S33, 4139469664); /* 39 */
HH ( b, c, d, a, in[10], S34, 3200236656); /* 40 */
HH ( a, b, c, d, in[13], S31, 681279174); /* 41 */
HH ( d, a, b, c, in[ 0], S32, 3936430074); /* 42 */
HH ( c, d, a, b, in[ 3], S33, 3572445317); /* 43 */
HH ( b, c, d, a, in[ 6], S34, 76029189); /* 44 */
HH ( a, b, c, d, in[ 9], S31, 3654602809); /* 45 */
HH ( d, a, b, c, in[12], S32, 3873151461); /* 46 */
HH ( c, d, a, b, in[15], S33, 530742520); /* 47 */
HH ( b, c, d, a, in[ 2], S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II ( a, b, c, d, in[ 0], S41, 4096336452); /* 49 */
II ( d, a, b, c, in[ 7], S42, 1126891415); /* 50 */
II ( c, d, a, b, in[14], S43, 2878612391); /* 51 */
II ( b, c, d, a, in[ 5], S44, 4237533241); /* 52 */
II ( a, b, c, d, in[12], S41, 1700485571); /* 53 */
II ( d, a, b, c, in[ 3], S42, 2399980690); /* 54 */
II ( c, d, a, b, in[10], S43, 4293915773); /* 55 */
II ( b, c, d, a, in[ 1], S44, 2240044497); /* 56 */
II ( a, b, c, d, in[ 8], S41, 1873313359); /* 57 */
II ( d, a, b, c, in[15], S42, 4264355552); /* 58 */
II ( c, d, a, b, in[ 6], S43, 2734768916); /* 59 */
II ( b, c, d, a, in[13], S44, 1309151649); /* 60 */
II ( a, b, c, d, in[ 4], S41, 4149444226); /* 61 */
II ( d, a, b, c, in[11], S42, 3174756917); /* 62 */
II ( c, d, a, b, in[ 2], S43, 718787259); /* 63 */
II ( b, c, d, a, in[ 9], S44, 3951481745); /* 64 */
a += h0;
b += h1;
c += h2;
d += h3;
// Check if this hash is the target hash
if (a == d_targetDigest[0] && b == d_targetDigest[1] && c == d_targetDigest[2] && d == d_targetDigest[3]){
*d_messageNumber = 1;
for (size_t i = 0; i != size; ++i)
message[i] = toHashAsChar[i];
}
// If it's not the hash we're after, create the next permutation/key/message
else {
size_t i = size - 1;
bool incrementNext = true;
while (incrementNext) {
if (toHashAsCharIndices[i] < (charsetLength - 1)) {
++toHashAsCharIndices[i];
incrementNext = false;
}
else {
if (toHashAsCharIndices[i] >= charsetLength) {
*d_messageNumber = 3;
}
toHashAsCharIndices[i] = 0;
if (i == 0) {
incrementNext = false;
}
else {
--i;
}
}
}
}
}
}
}
|
9fb8f05c22bdf026c48ddd2a4cbe0a38ec1c6bc6.cu
|
/*
* File: MD5_GPU.cu
* Authors: Stefano Charissis, Jin-Long Lee and Peter Wong
*
* Description:
* This is the CUDA code. It consists of a kernel, some device functions and helper functions.
* The MD5-specific code (eg. most of the #defines) are taken from Ronald Rivests official MD5 implementation.
*/
#include "MD5_GPU.h"
#include <cmath>
#include <cstdlib>
#include <string>
#include <iostream>
#include <utility>
#include <cuda.h>
#include <cutil.h>
#include "Utility.h"
using namespace std;
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
// The MD5 digest to match.
#define NUM_DIGEST_SIZE 4
__device__ __constant__ uint d_targetDigest[NUM_DIGEST_SIZE];
// Target character set.
#define NUM_POWER_SYMBOLS 96
__device__ __constant__ uchar d_powerSymbols[NUM_POWER_SYMBOLS];
// Power values used for permutation of new messages.
#define NUM_POWER_VALUES 16
__constant__ float d_powerValues[NUM_POWER_VALUES];
__global__ void doMD5(float*, float, size_t, size_t, float*, uchar*);
// char to uchar conversion
uchar c2c (char c){
return (uchar)((c > '9') ? (c - 'a' + 10) : (c - '0'));
}
void initialiseGPU(string targetDigest, string targetCharset) {
// Reverse target endianess
uint h_targetDigest[4];
for (int c=0;c<targetDigest.size();c+=8) {
uint x = c2c(targetDigest[c]) <<4 | c2c(targetDigest[c+1]);
uint y = c2c(targetDigest[c+2]) << 4 | c2c(targetDigest[c+3]);
uint z = c2c(targetDigest[c+4]) << 4 | c2c(targetDigest[c+5]);
uint w = c2c(targetDigest[c+6]) << 4 | c2c(targetDigest[c+7]);
h_targetDigest[c/8] = w << 24 | z << 16 | y << 8 | x;
}
// Copy target digest from host to GPU.
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_targetDigest, h_targetDigest,
NUM_DIGEST_SIZE * sizeof(uint), 0, cudaMemcpyHostToDevice));
// Copy target character set from host to GPU.
uchar h_powerSymbols[NUM_POWER_SYMBOLS];
for (size_t i = 0; i != targetCharset.length(); ++i)
h_powerSymbols[i] = targetCharset[i];
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_powerSymbols, h_powerSymbols,
NUM_POWER_SYMBOLS * sizeof(uchar)));
// Copy power values used for permutation from host to GPU.
float charsetLen = targetCharset.length();
float h_powerValues[NUM_POWER_VALUES];
for (size_t i = 0; i != NUM_POWER_VALUES; ++i)
h_powerValues[i] = pow(charsetLen, (float)(NUM_POWER_VALUES - i - 1));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_powerValues, h_powerValues,
NUM_POWER_VALUES * sizeof(float)));
}
pair<bool, string> findMessage(size_t min, size_t max, size_t charsetLength) {
bool isFound = false;
string message;
// BlockSize and ThreadPerBlock Configuration
// Optimal value pairs are system-dependant
// Experimentation is necessary
int nBlocks = 8; //8
int nThreadsPerBlock = 256; //256
//8 x 256 seems best on my system (256MB 8600GT, E6600)
// messageNumber; used to indicate a result
float* d_messageNumber;
float h_messageNumber = -1;
//Copy messageNumber from host to device
CUDA_SAFE_CALL(cudaMalloc((void**)&d_messageNumber, sizeof(float)));
CUDA_SAFE_CALL(cudaMemcpy(d_messageNumber, &h_messageNumber,
sizeof(float), cudaMemcpyHostToDevice));
float* d_startNumbers;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_startNumbers, (nBlocks * nThreadsPerBlock) * sizeof(float)));
float h_startNumbers[(nBlocks * nThreadsPerBlock)];
uchar* d_message;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_message, 16 * sizeof(uchar)));
uchar h_message[16];
for (size_t size = min; size <= max; ++size) {
float maxValue = pow((float)charsetLength, (float)size);
float nIterations = ceil(maxValue / (nBlocks * nThreadsPerBlock));
for (size_t i = 0; i != (nBlocks * nThreadsPerBlock); ++i) {
h_startNumbers[i] = i * nIterations;
}
CUDA_SAFE_CALL(cudaMemcpy(d_startNumbers, h_startNumbers,
(nBlocks * nThreadsPerBlock) * sizeof(float), cudaMemcpyHostToDevice));
doMD5<<< nBlocks, nThreadsPerBlock >>>(d_startNumbers, nIterations, charsetLength, size, d_messageNumber, d_message);
cudaThreadSynchronize();
cout << size << endl;
printf("%s\n", cudaGetErrorString(cudaGetLastError()));
CUDA_SAFE_CALL(cudaMemcpy(&h_messageNumber, d_messageNumber,
sizeof(float), cudaMemcpyDeviceToHost));
if (h_messageNumber != -1) {
printf("%f\n", h_messageNumber);
CUDA_SAFE_CALL(cudaMemcpy(h_message, d_message,
16 * sizeof(uchar), cudaMemcpyDeviceToHost));
string message;
for (size_t i = 0; i != size; ++i)
message.push_back(h_message[i]);
cout << message << endl;
break;
}
}
CUDA_SAFE_CALL(cudaFree(d_startNumbers));
CUDA_SAFE_CALL(cudaFree(d_message));
return make_pair(isFound, message);
}
__global__ void doMD5(float* d_startNumbers, float nIterations, size_t charsetLength, size_t size, float* d_messageNumber, uchar* message) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
float maxValue = powf(__uint2float_rz(charsetLength), __uint2float_rz(size));
uint in[17];
// Zero out the chunk to hash.
for (size_t i = 0; i != 17; ++i)
in[i] = 0x00000000;
// Put the message length in bits.
in[14] = size << 3;
uchar* toHashAsChar = (uchar*)in;
// Pads the string to the required length.
for (size_t i = 0; i != size; ++i)
toHashAsChar[i] = d_powerSymbols[0];
// Put the 1 bit (as per MD5 spec)
toHashAsChar[size] = 0x80;
float numberToConvert = d_startNumbers[idx];
size_t toHashAsCharIndices[17];
if (numberToConvert < maxValue) {
for (size_t i = 0; i != size; ++i) {
toHashAsCharIndices[i] = __float2uint_rz(floorf(numberToConvert / d_powerValues[NUM_POWER_VALUES - size + i]));
numberToConvert = floorf(fmodf(numberToConvert, d_powerValues[NUM_POWER_VALUES - size + i]));
}
// #pragma unroll is a compiler-based optimisation; loops are unrolled
#pragma unroll 3
for (float iterationsDone = 0; iterationsDone != nIterations; ++iterationsDone) {
if (*d_messageNumber == 1)
break;
for (size_t i = 0; i != size; ++i)
toHashAsChar[i] = d_powerSymbols[toHashAsCharIndices[i]];
uint h0 = 0x67452301;
uint h1 = 0xEFCDAB89;
uint h2 = 0x98BADCFE;
uint h3 = 0x10325476;
uint a = h0;
uint b = h1;
uint c = h2;
uint d = h3;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF ( a, b, c, d, in[ 0], S11, 3614090360); /* 1 */
FF ( d, a, b, c, in[ 1], S12, 3905402710); /* 2 */
FF ( c, d, a, b, in[ 2], S13, 606105819); /* 3 */
FF ( b, c, d, a, in[ 3], S14, 3250441966); /* 4 */
FF ( a, b, c, d, in[ 4], S11, 4118548399); /* 5 */
FF ( d, a, b, c, in[ 5], S12, 1200080426); /* 6 */
FF ( c, d, a, b, in[ 6], S13, 2821735955); /* 7 */
FF ( b, c, d, a, in[ 7], S14, 4249261313); /* 8 */
FF ( a, b, c, d, in[ 8], S11, 1770035416); /* 9 */
FF ( d, a, b, c, in[ 9], S12, 2336552879); /* 10 */
FF ( c, d, a, b, in[10], S13, 4294925233); /* 11 */
FF ( b, c, d, a, in[11], S14, 2304563134); /* 12 */
FF ( a, b, c, d, in[12], S11, 1804603682); /* 13 */
FF ( d, a, b, c, in[13], S12, 4254626195); /* 14 */
FF ( c, d, a, b, in[14], S13, 2792965006); /* 15 */
FF ( b, c, d, a, in[15], S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG ( a, b, c, d, in[ 1], S21, 4129170786); /* 17 */
GG ( d, a, b, c, in[ 6], S22, 3225465664); /* 18 */
GG ( c, d, a, b, in[11], S23, 643717713); /* 19 */
GG ( b, c, d, a, in[ 0], S24, 3921069994); /* 20 */
GG ( a, b, c, d, in[ 5], S21, 3593408605); /* 21 */
GG ( d, a, b, c, in[10], S22, 38016083); /* 22 */
GG ( c, d, a, b, in[15], S23, 3634488961); /* 23 */
GG ( b, c, d, a, in[ 4], S24, 3889429448); /* 24 */
GG ( a, b, c, d, in[ 9], S21, 568446438); /* 25 */
GG ( d, a, b, c, in[14], S22, 3275163606); /* 26 */
GG ( c, d, a, b, in[ 3], S23, 4107603335); /* 27 */
GG ( b, c, d, a, in[ 8], S24, 1163531501); /* 28 */
GG ( a, b, c, d, in[13], S21, 2850285829); /* 29 */
GG ( d, a, b, c, in[ 2], S22, 4243563512); /* 30 */
GG ( c, d, a, b, in[ 7], S23, 1735328473); /* 31 */
GG ( b, c, d, a, in[12], S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH ( a, b, c, d, in[ 5], S31, 4294588738); /* 33 */
HH ( d, a, b, c, in[ 8], S32, 2272392833); /* 34 */
HH ( c, d, a, b, in[11], S33, 1839030562); /* 35 */
HH ( b, c, d, a, in[14], S34, 4259657740); /* 36 */
HH ( a, b, c, d, in[ 1], S31, 2763975236); /* 37 */
HH ( d, a, b, c, in[ 4], S32, 1272893353); /* 38 */
HH ( c, d, a, b, in[ 7], S33, 4139469664); /* 39 */
HH ( b, c, d, a, in[10], S34, 3200236656); /* 40 */
HH ( a, b, c, d, in[13], S31, 681279174); /* 41 */
HH ( d, a, b, c, in[ 0], S32, 3936430074); /* 42 */
HH ( c, d, a, b, in[ 3], S33, 3572445317); /* 43 */
HH ( b, c, d, a, in[ 6], S34, 76029189); /* 44 */
HH ( a, b, c, d, in[ 9], S31, 3654602809); /* 45 */
HH ( d, a, b, c, in[12], S32, 3873151461); /* 46 */
HH ( c, d, a, b, in[15], S33, 530742520); /* 47 */
HH ( b, c, d, a, in[ 2], S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II ( a, b, c, d, in[ 0], S41, 4096336452); /* 49 */
II ( d, a, b, c, in[ 7], S42, 1126891415); /* 50 */
II ( c, d, a, b, in[14], S43, 2878612391); /* 51 */
II ( b, c, d, a, in[ 5], S44, 4237533241); /* 52 */
II ( a, b, c, d, in[12], S41, 1700485571); /* 53 */
II ( d, a, b, c, in[ 3], S42, 2399980690); /* 54 */
II ( c, d, a, b, in[10], S43, 4293915773); /* 55 */
II ( b, c, d, a, in[ 1], S44, 2240044497); /* 56 */
II ( a, b, c, d, in[ 8], S41, 1873313359); /* 57 */
II ( d, a, b, c, in[15], S42, 4264355552); /* 58 */
II ( c, d, a, b, in[ 6], S43, 2734768916); /* 59 */
II ( b, c, d, a, in[13], S44, 1309151649); /* 60 */
II ( a, b, c, d, in[ 4], S41, 4149444226); /* 61 */
II ( d, a, b, c, in[11], S42, 3174756917); /* 62 */
II ( c, d, a, b, in[ 2], S43, 718787259); /* 63 */
II ( b, c, d, a, in[ 9], S44, 3951481745); /* 64 */
a += h0;
b += h1;
c += h2;
d += h3;
// Check if this hash is the target hash
if (a == d_targetDigest[0] && b == d_targetDigest[1] && c == d_targetDigest[2] && d == d_targetDigest[3]){
*d_messageNumber = 1;
for (size_t i = 0; i != size; ++i)
message[i] = toHashAsChar[i];
}
// If it's not the hash we're after, create the next permutation/key/message
else {
size_t i = size - 1;
bool incrementNext = true;
while (incrementNext) {
if (toHashAsCharIndices[i] < (charsetLength - 1)) {
++toHashAsCharIndices[i];
incrementNext = false;
}
else {
if (toHashAsCharIndices[i] >= charsetLength) {
*d_messageNumber = 3;
}
toHashAsCharIndices[i] = 0;
if (i == 0) {
incrementNext = false;
}
else {
--i;
}
}
}
}
}
}
}
|
97878fb1faee3587569030822cb5282e816c5870.hip
|
// !!! This is a file automatically generated by hipify!!!
/*********************************************************************
* Filename: aes_test.c
* Author: Brad Conte (brad AT bradconte.com)
* Copyright:
* Disclaimer: This code is presented "as is" without any guarantees.
* Details: Performs known-answer tests on the corresponding AES
implementation. These tests do not encompass the full
range of available test vectors and are not sufficient
for FIPS-140 certification. However, if the tests pass
it is very, very likely that the code is correct and was
compiled properly. This code also serves as
example usage of the functions.
*********************************************************************/
/*************************** HEADER FILES ***************************/
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <sys/stat.h>
#include "aes.h"
#include <hip/hip_runtime.h>
/*********************** FUNCTION DEFINITIONS ***********************/
void print_hex(BYTE str[], int len)
{
int idx;
for(idx = 0; idx < len; idx++)
printf("%02x", str[idx]);
}
/*int aes_ecb_test()
{
WORD key_schedule[60], idx;
BYTE enc_buf[128];
BYTE plaintext[2][16] = {
{0x6b,0xc1,0xbe,0xe2,0x2e,0x40,0x9f,0x96,0xe9,0x3d,0x7e,0x11,0x73,0x93,0x17,0x2a},
{0xae,0x2d,0x8a,0x57,0x1e,0x03,0xac,0x9c,0x9e,0xb7,0x6f,0xac,0x45,0xaf,0x8e,0x51}
};
BYTE ciphertext[2][16] = {
{0xf3,0xee,0xd1,0xbd,0xb5,0xd2,0xa0,0x3c,0x06,0x4b,0x5a,0x7e,0x3d,0xb1,0x81,0xf8},
{0x59,0x1c,0xcb,0x10,0xd4,0x10,0xed,0x26,0xdc,0x5b,0xa7,0x4a,0x31,0x36,0x28,0x70}
};
BYTE key[1][32] = {
{0x60,0x3d,0xeb,0x10,0x15,0xca,0x71,0xbe,0x2b,0x73,0xae,0xf0,0x85,0x7d,0x77,0x81,0x1f,0x35,0x2c,0x07,0x3b,0x61,0x08,0xd7,0x2d,0x98,0x10,0xa3,0x09,0x14,0xdf,0xf4}
};
int pass = 1;
// Raw ECB mode.
//printf("* ECB mode:\n");
aes_key_setup(key[0], key_schedule, 256);
//printf( "Key : ");
//print_hex(key[0], 32);
for(idx = 0; idx < 2; idx++) {
aes_encrypt(plaintext[idx], enc_buf, key_schedule, 256);
//printf("\nPlaintext : ");
//print_hex(plaintext[idx], 16);
//printf("\n-encrypted to: ");
//print_hex(enc_buf, 16);
pass = pass && !memcmp(enc_buf, ciphertext[idx], 16);
aes_decrypt(ciphertext[idx], enc_buf, key_schedule, 256);
//printf("\nCiphertext : ");
//print_hex(ciphertext[idx], 16);
//printf("\n-decrypted to: ");
//print_hex(enc_buf, 16);
pass = pass && !memcmp(enc_buf, plaintext[idx], 16);
//printf("\n\n");
}
return(pass);
}*/
int aes_test()
{
int pass = 1;
//pass = pass && aes_ecb_test();
//pass = pass && aes_cbc_test();
//pass = pass && aes_ctr_test();
//pass = pass && aes_ccm_test();
return(pass);
}
void enc_dec_file(char *filename)
{
/*********************** ABERTURA E LEITURA DO ARQUIVO DE ENTRADA ***********************/
BYTE *data;
BYTE *encrypted_data;
BYTE *decrypted_data;
//char *filename = "../../sample_files/hubble_1.tif";
WORD key_schedule[60];
BYTE key[1][32] = {
{0x60,0x3d,0xeb,0x10,0x15,0xca,0x71,0xbe,0x2b,0x73,0xae,0xf0,0x85,0x7d,0x77,0x81,0x1f,0x35,0x2c,0x07,0x3b,0x61,0x08,0xd7,0x2d,0x98,0x10,0xa3,0x09,0x14,0xdf,0xf4}
};
struct stat st; // stat guarda informaes sobre arquivos
size_t data_size_bytes = 0;
if (stat(filename, &st) == 0){ // provavelmente verifica se possivel atribuir o stat st ao arquivo base, e caso sim
data_size_bytes = sizeof(BYTE) * st.st_size;
data = (BYTE *) malloc(data_size_bytes); // reserva o tamanho do arquivo base em memria para o ponteiro data.
};
FILE *file = fopen(filename, "rb"); // abre o arquivo do tipo nao texto ("rb")
// copia a imagem para o vetor data
if(data != NULL && file){ // se o ponteiro data e de arquivo ja foram alocados e abertos, respectivamente, entao prossegue
int current_byte = 0; // define o byte atual
// de byte em byte, copia o arquivo para o array data
while(fread(&data[current_byte], sizeof(BYTE), 1, file) == 1){
current_byte += 1; // atualiza o byte atual
};
};
encrypted_data = (BYTE *) malloc(data_size_bytes); // reserva espao em memoria para o arquivo criptogrfado
decrypted_data = (BYTE *) malloc(data_size_bytes); // o mesmo para o arquivo decodificado
BYTE *d_data; // ponteiro para os dados da imagem base no device
BYTE *d_encrypted_data; // ponteiro para os dados criptografados no device
BYTE *d_decrypted_data; // ponteiro para os dados descriptografados no device
// alocao de memria para os devices
hipMalloc((void **)&d_data, data_size_bytes);
hipMalloc((void **)&d_encrypted_data, data_size_bytes);
hipMalloc((void **)&d_decrypted_data, data_size_bytes);
hipMemcpy(d_data, data, data_size_bytes, hipMemcpyHostToDevice); // copia o array de dados para o device
WORD *d_schedule;
hipMalloc((void **)&d_schedule, sizeof(key_schedule));
int threadsPerBlock = 256;
int blocksPerGrid =((data_size_bytes + threadsPerBlock - 1) / threadsPerBlock)/AES_BLOCK_SIZE;
aes_key_setup(key[0], key_schedule, 256);
hipMemcpy(d_schedule, key_schedule, sizeof(key_schedule), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( aes_encrypt), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_data, d_encrypted_data, d_schedule, 256, data_size_bytes); // criptografa o buffer e salva no data_enc
hipLaunchKernelGGL(( aes_decrypt), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_encrypted_data, d_decrypted_data, d_schedule, 256, data_size_bytes); // descriptografa o data_enc e salva no data_dec
hipMemcpy(encrypted_data, d_encrypted_data, data_size_bytes, hipMemcpyDeviceToHost);
hipMemcpy(decrypted_data, d_decrypted_data, data_size_bytes, hipMemcpyDeviceToHost);
// cria arquivos para a escrita dos dados cripto e descripto
FILE *enc_file = fopen("file.enc", "wb+");
FILE *dec_file = fopen("file.dec", "wb+");
// escreve os dados nos respectivos arquivos criados
fwrite(encrypted_data, sizeof(BYTE) * st.st_size, 1, enc_file);
fwrite(decrypted_data, sizeof(BYTE) * st.st_size, 1, dec_file);
// fecha os arquivos
fclose(enc_file);
fclose(dec_file);
};
int main(int argc, char *argv[])
{
if (argc == 2) {
enc_dec_file(argv[1]);
}
/*
printf("AES Tests: %s\n", aes_test() ? "SUCCEEDED" : "FAILED");
*/
return(0);
}
|
97878fb1faee3587569030822cb5282e816c5870.cu
|
/*********************************************************************
* Filename: aes_test.c
* Author: Brad Conte (brad AT bradconte.com)
* Copyright:
* Disclaimer: This code is presented "as is" without any guarantees.
* Details: Performs known-answer tests on the corresponding AES
implementation. These tests do not encompass the full
range of available test vectors and are not sufficient
for FIPS-140 certification. However, if the tests pass
it is very, very likely that the code is correct and was
compiled properly. This code also serves as
example usage of the functions.
*********************************************************************/
/*************************** HEADER FILES ***************************/
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <sys/stat.h>
#include "aes.h"
#include <cuda_runtime.h>
/*********************** FUNCTION DEFINITIONS ***********************/
void print_hex(BYTE str[], int len)
{
int idx;
for(idx = 0; idx < len; idx++)
printf("%02x", str[idx]);
}
/*int aes_ecb_test()
{
WORD key_schedule[60], idx;
BYTE enc_buf[128];
BYTE plaintext[2][16] = {
{0x6b,0xc1,0xbe,0xe2,0x2e,0x40,0x9f,0x96,0xe9,0x3d,0x7e,0x11,0x73,0x93,0x17,0x2a},
{0xae,0x2d,0x8a,0x57,0x1e,0x03,0xac,0x9c,0x9e,0xb7,0x6f,0xac,0x45,0xaf,0x8e,0x51}
};
BYTE ciphertext[2][16] = {
{0xf3,0xee,0xd1,0xbd,0xb5,0xd2,0xa0,0x3c,0x06,0x4b,0x5a,0x7e,0x3d,0xb1,0x81,0xf8},
{0x59,0x1c,0xcb,0x10,0xd4,0x10,0xed,0x26,0xdc,0x5b,0xa7,0x4a,0x31,0x36,0x28,0x70}
};
BYTE key[1][32] = {
{0x60,0x3d,0xeb,0x10,0x15,0xca,0x71,0xbe,0x2b,0x73,0xae,0xf0,0x85,0x7d,0x77,0x81,0x1f,0x35,0x2c,0x07,0x3b,0x61,0x08,0xd7,0x2d,0x98,0x10,0xa3,0x09,0x14,0xdf,0xf4}
};
int pass = 1;
// Raw ECB mode.
//printf("* ECB mode:\n");
aes_key_setup(key[0], key_schedule, 256);
//printf( "Key : ");
//print_hex(key[0], 32);
for(idx = 0; idx < 2; idx++) {
aes_encrypt(plaintext[idx], enc_buf, key_schedule, 256);
//printf("\nPlaintext : ");
//print_hex(plaintext[idx], 16);
//printf("\n-encrypted to: ");
//print_hex(enc_buf, 16);
pass = pass && !memcmp(enc_buf, ciphertext[idx], 16);
aes_decrypt(ciphertext[idx], enc_buf, key_schedule, 256);
//printf("\nCiphertext : ");
//print_hex(ciphertext[idx], 16);
//printf("\n-decrypted to: ");
//print_hex(enc_buf, 16);
pass = pass && !memcmp(enc_buf, plaintext[idx], 16);
//printf("\n\n");
}
return(pass);
}*/
int aes_test()
{
int pass = 1;
//pass = pass && aes_ecb_test();
//pass = pass && aes_cbc_test();
//pass = pass && aes_ctr_test();
//pass = pass && aes_ccm_test();
return(pass);
}
void enc_dec_file(char *filename)
{
/*********************** ABERTURA E LEITURA DO ARQUIVO DE ENTRADA ***********************/
BYTE *data;
BYTE *encrypted_data;
BYTE *decrypted_data;
//char *filename = "../../sample_files/hubble_1.tif";
WORD key_schedule[60];
BYTE key[1][32] = {
{0x60,0x3d,0xeb,0x10,0x15,0xca,0x71,0xbe,0x2b,0x73,0xae,0xf0,0x85,0x7d,0x77,0x81,0x1f,0x35,0x2c,0x07,0x3b,0x61,0x08,0xd7,0x2d,0x98,0x10,0xa3,0x09,0x14,0xdf,0xf4}
};
struct stat st; // stat guarda informações sobre arquivos
size_t data_size_bytes = 0;
if (stat(filename, &st) == 0){ // provavelmente verifica se é possivel atribuir o stat st ao arquivo base, e caso sim
data_size_bytes = sizeof(BYTE) * st.st_size;
data = (BYTE *) malloc(data_size_bytes); // reserva o tamanho do arquivo base em memória para o ponteiro data.
};
FILE *file = fopen(filename, "rb"); // abre o arquivo do tipo nao texto ("rb")
// copia a imagem para o vetor data
if(data != NULL && file){ // se o ponteiro data e de arquivo ja foram alocados e abertos, respectivamente, entao prossegue
int current_byte = 0; // define o byte atual
// de byte em byte, copia o arquivo para o array data
while(fread(&data[current_byte], sizeof(BYTE), 1, file) == 1){
current_byte += 1; // atualiza o byte atual
};
};
encrypted_data = (BYTE *) malloc(data_size_bytes); // reserva espaço em memoria para o arquivo criptogrfado
decrypted_data = (BYTE *) malloc(data_size_bytes); // o mesmo para o arquivo decodificado
BYTE *d_data; // ponteiro para os dados da imagem base no device
BYTE *d_encrypted_data; // ponteiro para os dados criptografados no device
BYTE *d_decrypted_data; // ponteiro para os dados descriptografados no device
// alocação de memória para os devices
cudaMalloc((void **)&d_data, data_size_bytes);
cudaMalloc((void **)&d_encrypted_data, data_size_bytes);
cudaMalloc((void **)&d_decrypted_data, data_size_bytes);
cudaMemcpy(d_data, data, data_size_bytes, cudaMemcpyHostToDevice); // copia o array de dados para o device
WORD *d_schedule;
cudaMalloc((void **)&d_schedule, sizeof(key_schedule));
int threadsPerBlock = 256;
int blocksPerGrid =((data_size_bytes + threadsPerBlock - 1) / threadsPerBlock)/AES_BLOCK_SIZE;
aes_key_setup(key[0], key_schedule, 256);
cudaMemcpy(d_schedule, key_schedule, sizeof(key_schedule), cudaMemcpyHostToDevice);
aes_encrypt<<<blocksPerGrid, threadsPerBlock>>>(d_data, d_encrypted_data, d_schedule, 256, data_size_bytes); // criptografa o buffer e salva no data_enc
aes_decrypt<<<blocksPerGrid, threadsPerBlock>>>(d_encrypted_data, d_decrypted_data, d_schedule, 256, data_size_bytes); // descriptografa o data_enc e salva no data_dec
cudaMemcpy(encrypted_data, d_encrypted_data, data_size_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(decrypted_data, d_decrypted_data, data_size_bytes, cudaMemcpyDeviceToHost);
// cria arquivos para a escrita dos dados cripto e descripto
FILE *enc_file = fopen("file.enc", "wb+");
FILE *dec_file = fopen("file.dec", "wb+");
// escreve os dados nos respectivos arquivos criados
fwrite(encrypted_data, sizeof(BYTE) * st.st_size, 1, enc_file);
fwrite(decrypted_data, sizeof(BYTE) * st.st_size, 1, dec_file);
// fecha os arquivos
fclose(enc_file);
fclose(dec_file);
};
int main(int argc, char *argv[])
{
if (argc == 2) {
enc_dec_file(argv[1]);
}
/*
printf("AES Tests: %s\n", aes_test() ? "SUCCEEDED" : "FAILED");
*/
return(0);
}
|
bf5dbf3b573bec1369389d1b6cfe2c34a3481c75.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define MAX_CELL_PER_THREAD 1
__global__ void kernel_compute_gen_last_shared(uint8_t *matrix_src, uint8_t *matrix_dst, uint32_t rows, uint32_t cols) {
__shared__ int shared[3][128 + 2];
int ix = ((blockDim.x - 2) * blockIdx.x + threadIdx.x) & (cols - 1);
int iy = (blockIdx.y + threadIdx.y) & (rows - 1);
int id = iy * cols + ix;
int i = threadIdx.y;
int j = threadIdx.x;
uint8_t mine = matrix_src[id]; // keep cell in register
shared[i][j] = mine;
//shared[i][j] = matrix_src[id];
__syncthreads();
if (i == 1 && j > 0 && j < 129){
uint8_t aliveCells = shared[i + 1][j] + // lower
shared[i - 1][j] + // upper
shared[i][j + 1] + // right
shared[i][j - 1] + // left
shared[i + 1][j + 1] +
shared[i - 1][j - 1] + //diagonals
shared[i - 1][j + 1] +
shared[i + 1][j - 1];
matrix_dst[id] = (aliveCells == 3 || (aliveCells == 2 && mine)) ? 1 : 0;
}
}
|
bf5dbf3b573bec1369389d1b6cfe2c34a3481c75.cu
|
#include "includes.h"
#define MAX_CELL_PER_THREAD 1
__global__ void kernel_compute_gen_last_shared(uint8_t *matrix_src, uint8_t *matrix_dst, uint32_t rows, uint32_t cols) {
__shared__ int shared[3][128 + 2];
int ix = ((blockDim.x - 2) * blockIdx.x + threadIdx.x) & (cols - 1);
int iy = (blockIdx.y + threadIdx.y) & (rows - 1);
int id = iy * cols + ix;
int i = threadIdx.y;
int j = threadIdx.x;
uint8_t mine = matrix_src[id]; // keep cell in register
shared[i][j] = mine;
//shared[i][j] = matrix_src[id];
__syncthreads();
if (i == 1 && j > 0 && j < 129){
uint8_t aliveCells = shared[i + 1][j] + // lower
shared[i - 1][j] + // upper
shared[i][j + 1] + // right
shared[i][j - 1] + // left
shared[i + 1][j + 1] +
shared[i - 1][j - 1] + //diagonals
shared[i - 1][j + 1] +
shared[i + 1][j - 1];
matrix_dst[id] = (aliveCells == 3 || (aliveCells == 2 && mine)) ? 1 : 0;
}
}
|
41ff27dccfbd56c7935842907e87558e65d154b4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// fdk-ts-t.cu
// Threaded versions of FDK back-projection
// For detector index (t,s).
// Copyright 2008-10-09, Jeff Fessler, University of Michigan
#include "jf-cuda.h"
#include "def,fdk.h"
#include "fdk-gpu.h"
#include <pthread.h>
#include <stdio.h>
#include "mex.h"
#define TEXMEMO 1
#define MULT_GPU 0
#define USE_MASK 0
#if MULT_GPU
#define NUM_THREADS 4
void *PrintHello(void *threadid){
long tid;
tid = (long)threadid;
printf("Hello World! It's me, thread #%d! \n", tid);
return NULL;
}
#endif
#ifdef fdk_gpu
//Mew: use testure memory
texture< float, 2, hipReadModeElementType> texRef;
//Mew: use constant memory
__constant__ float dc_wx;
__constant__ float dc_wy;
__constant__ float dc_wz;
__constant__ float dc_ws;
__constant__ float dc_wt;
__constant__ int dc_nx;
__constant__ int dc_ny;
__constant__ int dc_nz;
__constant__ int dc_ns;
__constant__ int dc_nt;
__constant__ float dc_dx;
__constant__ float dc_dy;
__constant__ float dc_dz;
__constant__ float dc_ds;
__constant__ float dc_dt;
__constant__ float dc_dso;
__constant__ float dc_dsd;
__constant__ float dc_dfs;
__constant__ int dc_nxy;
//////////////////////////////////////////////////////////////////////////////////////
/// kernel_1
//////////////////////////////////////////////////////////////////////////////////////
#if USE_MASK
static __global__ void fdk_ts_back1_kernel( float *s_val, truf is_arc,float sinb,float cosb,cbyte *mask2) //source angle [radians]
#else
static __global__ void fdk_ts_back1_kernel( float *s_val, truf is_arc,float sinb,float cosb)
#endif
{
// index into image array
// determine the index of x, y
cint ix = blockIdx.x * blockDim.x + threadIdx.x;
cint iy = blockIdx.y * blockDim.y + threadIdx.y;
// if index is out of bound
#if USE_MASK
if (ix >= dc_nx || iy >= dc_ny || !mask2[ix + iy*dc_nx])
#else
if (ix >= dc_nx || iy >= dc_ny )
#endif
return;
cfloat yy = dc_dy * iy - dc_wy;
cfloat xx = dc_dx * ix - dc_wx;
cfloat ybetas = dc_dso - (-xx * sinb + yy * cosb);
cfloat xbeta = xx * cosb + yy * sinb;
cfloat mag = dc_dsd / ybetas;
float ss_bin;
float w2;
if ( is_arc ){
ss_bin = dc_dsd * atan2f(xbeta, ybetas) / dc_ds + dc_ws;
w2 = Sqr(dc_dsd) / (Sqr(ybetas) + Sqr(xbeta));
}
else{
ss_bin = mag * xbeta / dc_ds + dc_ws;
w2 = mag* mag ;
}
s_val[ix + iy*dc_nx] = (float)ss_bin;
s_val[ix + iy*dc_nx + dc_nxy] = (float)w2;
s_val[ix + iy*dc_nx + 2*dc_nxy] = (float)mag;
}
//////////////////////////////////////////////////////////////////////////////////////
/// kernel_2
//////////////////////////////////////////////////////////////////////////////////////
#if TEXMEMO
#if USE_MASK
__global__ void fdk_ts_back1_kernel_2(float *s_val, float *image,cbyte *mask2)
#else
__global__ void fdk_ts_back1_kernel_2(float *s_val, float *image)
#endif
{
cint ix = blockIdx.x % dc_nx;
cint iy = blockIdx.y;
cint iz = threadIdx.x+512*(dc_nz/512);
//NEW 5: use shared memory
__shared__ float ss_bin;
__shared__ float w2;
__shared__ float mag;
if (iz==1){
ss_bin = s_val[ix + iy*dc_nx];
w2 = s_val[ix + iy*dc_nx + dc_nxy];
mag = s_val[ix + iy*dc_nx + 2*dc_nxy];
}
__syncthreads();
//New: use texture memory for bilinear interpolation
float zz = dc_dz * iz - dc_wz;
float tt_bin = mag * zz / dc_dt + dc_wt;
#if USE_MASK
if (tt_bin < 0 || tt_bin > dc_nt || ss_bin < 0 || ss_bin > dc_ns|| ix>=dc_nx || iy>=dc_ny || (mask2[ix + iy*dc_nx] != 1))
#else
if ( ix>=dc_nx || iy>=dc_ny )
#endif
return;
image[(ix + iy * dc_nx) * dc_nz + iz] += w2 * tex2D(texRef, tt_bin , ss_bin );
}
#else
#if USE_MASK
__global__ void fdk_ts_back1_kernel_2(float *s_val, float *image, cfloat *proj,cbyte *mask2)
#else
__global__ void fdk_ts_back1_kernel_2(float *s_val, float *image, cfloat *proj)
#endif
{
// NEW 5:
// index into image array
cint ix = blockIdx.x % dc_nx;
cint iy = blockIdx.y;
// if more than 512, iz=threadIdx.x+512
// else iz=threadIdx.x
cint iz = threadIdx.x+512*(dc_nz/512);
//NEW 5: use shared memory
__shared__ float ss_bin;
__shared__ float w2;
__shared__ float mag;
if (iz==1){
ss_bin = s_val[ix + iy*dc_nx];
w2 = s_val[ix + iy*dc_nx + dc_nxy];
mag = s_val[ix + iy*dc_nx + 2*dc_nxy];
}
__syncthreads();
// index of s is "is"
// index of nearest neighbor in "s"
cint is = floorf(ss_bin);
// Check if index out of bound
#if USE_MASK
if (is < 0 || is >= dc_ns-1 || ix>=dc_nx || iy>=dc_ny || !(mask2[ix + iy*dc_nx])) // each thread does its part only
#else
if (is < 0 || is >= dc_ns-1 || ix>=dc_nx || iy>=dc_ny )
#endif
return;
// horizontal bilinear
cfloat wr = ss_bin - is;
// interpolation factors
cfloat wl = 1. - wr;
//find the image point
image += (ix + iy * dc_nx) * dc_nz + iz;
cfloat *pp1 = proj + is * dc_nt;
cfloat *pp2 = proj + (is+1) * dc_nt;
//vertical biliner
cfloat zz = dc_dz * iz - dc_wz;
cfloat tt = mag * zz;
cfloat tt_bin = tt / dc_dt + dc_wt;
// z value is used to determine index of t "it" nearest nbr in "t"
cint it = floorf(tt_bin);
if (it < 0 || it >= dc_nt-1) // out of FOV
return;
// reconstructing the image
else {
cfloat wu = tt_bin - it;
cfloat wd = 1. - wu;
cfloat p1 = wl * pp1[it]
+ wr * pp2[it]; // interpolate
cfloat p2 = wl * pp1[it+1]
+ wr * pp2[it+1]; // horizontal
// final vertical interpolation:
*image += w2 * (wu * p1 + wd * p2);
}
}
#endif
#endif
typedef struct {
float *image; // [nz nx ny] <- trick!
const cbct_ig *ig; // image geometry
const cbct_cg *cg; // cone-beam CT system geometry
int na; // # of views
float *proj; // [nt ns na] <- trick! projection views
cdouble *beta; // [na] source angles [radians]
} fdk_ts_s;
#ifdef fdk_gpu
static int iDivUp(int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
#endif
//////////////////////////////////////////////////////////////////////////////////////
/// fdk_ts_back_init()
//////////////////////////////////////////////////////////////////////////////////////
static sof fdk_ts_back_init(void *in, cint id, cint nthread)
{
fdk_ts_s *pa = (fdk_ts_s *) in;
const cbct_ig *ig = pa->ig;
const cbct_cg *cg = pa->cg;
cint na = pa->na;
float *proj = pa->proj;
cdouble *beta = pa->beta;
//calculate the data size
cint nst = cg->ns * cg->nt;
cint nxy = ig->nx * ig->ny;
cint nxyz = ig->nx * ig->ny * ig->nz;
//printf("Ox=%d Oy=%d Oz=%d Os=%d Ot=%d \n", ig->offset_x,ig->offset_y, ig->offset_z, cg->offset_s, cg->offset_t);
//calculate the offset
//note: wxyz are different with wst
cfloat wx = ig->dx *( (ig->nx-1)/2. + ig->offset_x );
cfloat wy = ig->dy *( (ig->ny-1)/2. + ig->offset_y );
cfloat wz = ig->dz *( (ig->nz-1)/2. + ig->offset_z );
cfloat ws = (cg->ns-1)/2. + cg->offset_s;
cfloat wt = (cg->nt-1)/2. + cg->offset_t;
//printf("wx=%d wy=%d wz=%d ws=%d wt=%d \n", wx, wy, wz, ws, wt);
#ifdef fdk_gpu
////prepare for multigpus
#if MULT_GPU
int dCnt = 0;
int selectedCudaDeviceId = 0;
hipGetDeviceCount(&dCnt) ;
printf("number of cuda gpu devices: %d\n", dCnt);
if (dCnt > 0) {
if (dCnt > 1) {
int multiprocessor_cnt = 0;
hipDeviceProp_t prop;
for (int deviceId=0; deviceId<dCnt; ++deviceId) {
if (hipSuccess == hipGetDeviceProperties(&prop, deviceId)) {
if (prop.multiProcessorCount > multiprocessor_cnt) {
multiprocessor_cnt = prop.multiProcessorCount;
selectedCudaDeviceId = deviceId;
}
}
}
} else {
selectedCudaDeviceId = 0;
}
printf("selected device with most multiprocessors: %d\n", selectedCudaDeviceId);
hipSetDevice(selectedCudaDeviceId);
}
pthread_t threads[NUM_THREADS];
int rc;
long t;
for(t=0 ; t< NUM_THREADS ; t++){
printf("In main: creating thread %ld\n", t);
rc = pthread_create(&threads[t], NULL, PrintHello, (void *)t);
if (rc){
printf("ERROR; return code from pthread_create() is %d\n", rc);
exit(-1);
}
}
#endif
///// Load all this stuff into graphics memory
// image memory on device
float *dev_img;
jf_gpu_malloc(dev_img, nxyz)
// initialize device image to 0
jf_gpu_memset(dev_img, 0, nxyz)
#if USE_MASK
byte *dev_mask2;
jf_gpu_malloc(dev_mask2, nxy) // 2D mask
jf_gpu_put(dev_mask2, ig->mask2, nxy)
#endif
#if TEXMEMO
//printf("Using texture memory \n");
//use texture memory
hipArray *dev_proj;
hipChannelFormatDesc input_tex = hipCreateChannelDesc<float>();
//hipChannelFormatDesc input_tex = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipMallocArray(&dev_proj, &input_tex, cg->nt, cg->ns);
texRef.addressMode[0] = hipAddressModeWrap;
texRef.addressMode[1] = hipAddressModeWrap;
texRef.filterMode = hipFilterModeLinear;
texRef.normalized = 0;
#else
//printf("Using global memory \n");
// one projection view on device
float *dev_proj;
jf_gpu_malloc(dev_proj, nst)
#endif
// s values for each x,y pair on device
float *dev_sval;
jf_gpu_malloc(dev_sval, nxy*3)
// initialize values to 0
jf_gpu_memset(dev_sval, 0, nxy)
//load the parameters to constant memory
hipMemcpyToSymbol( "dc_wx", &wx, sizeof(float) );
hipMemcpyToSymbol( "dc_wy", &wy, sizeof(float) );
hipMemcpyToSymbol( "dc_wz", &wz, sizeof(float) );
hipMemcpyToSymbol( "dc_ws", &ws, sizeof(float) );
hipMemcpyToSymbol( "dc_wt", &wt, sizeof(float) );
hipMemcpyToSymbol( "dc_nx", &(ig->nx), sizeof(int) );
hipMemcpyToSymbol( "dc_ny", &(ig->ny), sizeof(int) );
hipMemcpyToSymbol( "dc_nz", &(ig->nz), sizeof(int) );
hipMemcpyToSymbol( "dc_dx", &(ig->dx), sizeof(float) );
hipMemcpyToSymbol( "dc_dy", &(ig->dy), sizeof(float) );
hipMemcpyToSymbol( "dc_dz", &(ig->dz), sizeof(float) );
hipMemcpyToSymbol( "dc_ns", &(cg->ns), sizeof(int) );
hipMemcpyToSymbol( "dc_nt", &(cg->nt), sizeof(int) );
hipMemcpyToSymbol( "dc_ds", &(cg->ds), sizeof(float) );
hipMemcpyToSymbol( "dc_dt", &(cg->dt), sizeof(float) );
hipMemcpyToSymbol( "dc_dso", &(cg->dso), sizeof(float) );
hipMemcpyToSymbol( "dc_dsd", &(cg->dsd), sizeof(float) );
hipMemcpyToSymbol( "dc_dfs", &(cg->dfs), sizeof(float) );
hipMemcpyToSymbol( "dc_nxy", &(nxy), sizeof(int) );
////decide the block and grid sturcture
dim3 dimBlock(16, 16);
dim3 dimGrid(iDivUp(ig->nx,dimBlock.x), iDivUp(ig->ny,dimBlock.y));
// NEW 5: case where nz <=512
// all the z's in the same block for a given (x,y)
dim3 dimBlock2(ig->nz, 1, 1);
int numBlock_x = ((ig->nz/512)+1)*ig->nx;
dim3 dimGrid2(numBlock_x, ig->ny);
#endif
//decide the shape of detector
truf is_arc = 0;
if (cg->dfs == 0)
is_arc = 1;
else if (!Isinf(cg->dfs))
Warn("dfs not done - junk!")
#ifdef fdk_gpu
for (int ia=0; ia < na; ia=ia+1, proj += nst) {
// copy this view to gpu
#if USE_MASK
#if TEXMEMO
hipMemcpyToArray(dev_proj, 0, 0, proj, nst*sizeof(float),hipMemcpyHostToDevice);
hipBindTextureToArray(texRef, dev_proj,input_tex);
hipLaunchKernelGGL(( fdk_ts_back1_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_sval,is_arc,sinf(beta[ia]),cosf(beta[ia]),dev_mask2);
hipLaunchKernelGGL(( fdk_ts_back1_kernel_2), dim3(dimGrid2), dim3(dimBlock2), 0, 0, dev_sval,dev_img,dev_mask2);
#else
jf_gpu_put(dev_proj, proj, nst);
hipLaunchKernelGGL(( fdk_ts_back1_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_sval,is_arc,sinf(beta[ia]),cosf(beta[ia]),dev_mask2);
hipLaunchKernelGGL(( fdk_ts_back1_kernel_2), dim3(dimGrid2), dim3(dimBlock2), 0, 0, dev_sval,dev_img,dev_proj,dev_mask2);
#endif
#else
#if TEXMEMO
hipMemcpyToArray(dev_proj, 0, 0, proj, nst*sizeof(float),hipMemcpyHostToDevice);
hipBindTextureToArray(texRef, dev_proj,input_tex);
hipLaunchKernelGGL(( fdk_ts_back1_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_sval,is_arc,sinf(beta[ia]),cosf(beta[ia]));
hipLaunchKernelGGL(( fdk_ts_back1_kernel_2), dim3(dimGrid2), dim3(dimBlock2), 0, 0, dev_sval,dev_img);
#else
jf_gpu_put(dev_proj, proj, nst);
hipLaunchKernelGGL(( fdk_ts_back1_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_sval,is_arc,sinf(beta[ia]),cosf(beta[ia]));
hipLaunchKernelGGL(( fdk_ts_back1_kernel_2), dim3(dimGrid2), dim3(dimBlock2), 0, 0, dev_sval,dev_img,dev_proj);
#endif
#endif
}
//Note("Copying image to host")
jf_gpu_get(pa->image, dev_img, nxyz) // caution: works only for 1 thread
//Note("freeing memory\n")
jf_gpu_free(dev_img)
hipFree(dev_proj);
// jf_gpu_free(dev_proj)
jf_gpu_free(dev_sval)
#if USE_MASK
jf_gpu_free(dev_mask2)
#endif
#else
for (int ia=0; ia < na; ++ia, proj += nst) {
float *dev_img = pa->image; // already zeroed
cfloat *dev_proj = proj;
//cbyte *dev_mask2 = ig->mask2;
if (!fdk_ts_back(dev_img,
ig->nx, ig->ny, ig->nz,
ig->dx, ig->dy, ig->dz,
wx, wy , wz,
// dev_mask2, id + 1, // each thread does some voxels only
cg->dso, cg->dsd, cg->dfs,
cg->ns, cg->nt,
cg->ds, cg->dt, ws, wt,
dev_proj, beta[ia]),is_arc)
Fail("fdk_ts_back()")
}
#endif
Ok
}
sof fdk_ts_back_t(
float *image, // [nz nx ny] <- trick!
const cbct_ig *ig,
const cbct_cg *cg,
cint na, // # of views
cfloat *proj, // [nt ns na] <- trick! projection views
cdouble *beta, // [na] source angles [radians]
cint nthread, // # of threads
cint chat)
{
fdk_ts_s st;
#define put(arg) st.arg = arg;
put(image)
put(ig)
put(cg)
put(na)
put(proj)
put(beta)
#undef put
// TESTING
// printf("MY FILE COOLNESS! \n");
Bzero(image, ig->nx * ig->ny * ig->nz) // initialize image volume to 0
// Call(jf_thread1_top, (fdk_ts_back_init, NULL /* wrap up */, &st, nthread, Chat))
fdk_ts_back_init(&st, 4, 0);
Ok
}
|
41ff27dccfbd56c7935842907e87558e65d154b4.cu
|
// fdk-ts-t.cu
// Threaded versions of FDK back-projection
// For detector index (t,s).
// Copyright 2008-10-09, Jeff Fessler, University of Michigan
#include "jf-cuda.h"
#include "def,fdk.h"
#include "fdk-gpu.h"
#include <pthread.h>
#include <stdio.h>
#include "mex.h"
#define TEXMEMO 1
#define MULT_GPU 0
#define USE_MASK 0
#if MULT_GPU
#define NUM_THREADS 4
void *PrintHello(void *threadid){
long tid;
tid = (long)threadid;
printf("Hello World! It's me, thread #%d! \n", tid);
return NULL;
}
#endif
#ifdef fdk_gpu
//Mew: use testure memory
texture< float, 2, cudaReadModeElementType> texRef;
//Mew: use constant memory
__constant__ float dc_wx;
__constant__ float dc_wy;
__constant__ float dc_wz;
__constant__ float dc_ws;
__constant__ float dc_wt;
__constant__ int dc_nx;
__constant__ int dc_ny;
__constant__ int dc_nz;
__constant__ int dc_ns;
__constant__ int dc_nt;
__constant__ float dc_dx;
__constant__ float dc_dy;
__constant__ float dc_dz;
__constant__ float dc_ds;
__constant__ float dc_dt;
__constant__ float dc_dso;
__constant__ float dc_dsd;
__constant__ float dc_dfs;
__constant__ int dc_nxy;
//////////////////////////////////////////////////////////////////////////////////////
/// kernel_1
//////////////////////////////////////////////////////////////////////////////////////
#if USE_MASK
static __global__ void fdk_ts_back1_kernel( float *s_val, truf is_arc,float sinb,float cosb,cbyte *mask2) //source angle [radians]
#else
static __global__ void fdk_ts_back1_kernel( float *s_val, truf is_arc,float sinb,float cosb)
#endif
{
// index into image array
// determine the index of x, y
cint ix = blockIdx.x * blockDim.x + threadIdx.x;
cint iy = blockIdx.y * blockDim.y + threadIdx.y;
// if index is out of bound
#if USE_MASK
if (ix >= dc_nx || iy >= dc_ny || !mask2[ix + iy*dc_nx])
#else
if (ix >= dc_nx || iy >= dc_ny )
#endif
return;
cfloat yy = dc_dy * iy - dc_wy;
cfloat xx = dc_dx * ix - dc_wx;
cfloat ybetas = dc_dso - (-xx * sinb + yy * cosb);
cfloat xbeta = xx * cosb + yy * sinb;
cfloat mag = dc_dsd / ybetas;
float ss_bin;
float w2;
if ( is_arc ){
ss_bin = dc_dsd * atan2f(xbeta, ybetas) / dc_ds + dc_ws;
w2 = Sqr(dc_dsd) / (Sqr(ybetas) + Sqr(xbeta));
}
else{
ss_bin = mag * xbeta / dc_ds + dc_ws;
w2 = mag* mag ;
}
s_val[ix + iy*dc_nx] = (float)ss_bin;
s_val[ix + iy*dc_nx + dc_nxy] = (float)w2;
s_val[ix + iy*dc_nx + 2*dc_nxy] = (float)mag;
}
//////////////////////////////////////////////////////////////////////////////////////
/// kernel_2
//////////////////////////////////////////////////////////////////////////////////////
#if TEXMEMO
#if USE_MASK
__global__ void fdk_ts_back1_kernel_2(float *s_val, float *image,cbyte *mask2)
#else
__global__ void fdk_ts_back1_kernel_2(float *s_val, float *image)
#endif
{
cint ix = blockIdx.x % dc_nx;
cint iy = blockIdx.y;
cint iz = threadIdx.x+512*(dc_nz/512);
//NEW 5: use shared memory
__shared__ float ss_bin;
__shared__ float w2;
__shared__ float mag;
if (iz==1){
ss_bin = s_val[ix + iy*dc_nx];
w2 = s_val[ix + iy*dc_nx + dc_nxy];
mag = s_val[ix + iy*dc_nx + 2*dc_nxy];
}
__syncthreads();
//New: use texture memory for bilinear interpolation
float zz = dc_dz * iz - dc_wz;
float tt_bin = mag * zz / dc_dt + dc_wt;
#if USE_MASK
if (tt_bin < 0 || tt_bin > dc_nt || ss_bin < 0 || ss_bin > dc_ns|| ix>=dc_nx || iy>=dc_ny || (mask2[ix + iy*dc_nx] != 1))
#else
if ( ix>=dc_nx || iy>=dc_ny )
#endif
return;
image[(ix + iy * dc_nx) * dc_nz + iz] += w2 * tex2D(texRef, tt_bin , ss_bin );
}
#else
#if USE_MASK
__global__ void fdk_ts_back1_kernel_2(float *s_val, float *image, cfloat *proj,cbyte *mask2)
#else
__global__ void fdk_ts_back1_kernel_2(float *s_val, float *image, cfloat *proj)
#endif
{
// NEW 5:
// index into image array
cint ix = blockIdx.x % dc_nx;
cint iy = blockIdx.y;
// if more than 512, iz=threadIdx.x+512
// else iz=threadIdx.x
cint iz = threadIdx.x+512*(dc_nz/512);
//NEW 5: use shared memory
__shared__ float ss_bin;
__shared__ float w2;
__shared__ float mag;
if (iz==1){
ss_bin = s_val[ix + iy*dc_nx];
w2 = s_val[ix + iy*dc_nx + dc_nxy];
mag = s_val[ix + iy*dc_nx + 2*dc_nxy];
}
__syncthreads();
// index of s is "is"
// index of nearest neighbor in "s"
cint is = floorf(ss_bin);
// Check if index out of bound
#if USE_MASK
if (is < 0 || is >= dc_ns-1 || ix>=dc_nx || iy>=dc_ny || !(mask2[ix + iy*dc_nx])) // each thread does its part only
#else
if (is < 0 || is >= dc_ns-1 || ix>=dc_nx || iy>=dc_ny )
#endif
return;
// horizontal bilinear
cfloat wr = ss_bin - is;
// interpolation factors
cfloat wl = 1. - wr;
//find the image point
image += (ix + iy * dc_nx) * dc_nz + iz;
cfloat *pp1 = proj + is * dc_nt;
cfloat *pp2 = proj + (is+1) * dc_nt;
//vertical biliner
cfloat zz = dc_dz * iz - dc_wz;
cfloat tt = mag * zz;
cfloat tt_bin = tt / dc_dt + dc_wt;
// z value is used to determine index of t "it" nearest nbr in "t"
cint it = floorf(tt_bin);
if (it < 0 || it >= dc_nt-1) // out of FOV
return;
// reconstructing the image
else {
cfloat wu = tt_bin - it;
cfloat wd = 1. - wu;
cfloat p1 = wl * pp1[it]
+ wr * pp2[it]; // interpolate
cfloat p2 = wl * pp1[it+1]
+ wr * pp2[it+1]; // horizontal
// final vertical interpolation:
*image += w2 * (wu * p1 + wd * p2);
}
}
#endif
#endif
typedef struct {
float *image; // [nz nx ny] <- trick!
const cbct_ig *ig; // image geometry
const cbct_cg *cg; // cone-beam CT system geometry
int na; // # of views
float *proj; // [nt ns na] <- trick! projection views
cdouble *beta; // [na] source angles [radians]
} fdk_ts_s;
#ifdef fdk_gpu
static int iDivUp(int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
#endif
//////////////////////////////////////////////////////////////////////////////////////
/// fdk_ts_back_init()
//////////////////////////////////////////////////////////////////////////////////////
static sof fdk_ts_back_init(void *in, cint id, cint nthread)
{
fdk_ts_s *pa = (fdk_ts_s *) in;
const cbct_ig *ig = pa->ig;
const cbct_cg *cg = pa->cg;
cint na = pa->na;
float *proj = pa->proj;
cdouble *beta = pa->beta;
//calculate the data size
cint nst = cg->ns * cg->nt;
cint nxy = ig->nx * ig->ny;
cint nxyz = ig->nx * ig->ny * ig->nz;
//printf("Ox=%d Oy=%d Oz=%d Os=%d Ot=%d \n", ig->offset_x,ig->offset_y, ig->offset_z, cg->offset_s, cg->offset_t);
//calculate the offset
//note: wxyz are different with wst
cfloat wx = ig->dx *( (ig->nx-1)/2. + ig->offset_x );
cfloat wy = ig->dy *( (ig->ny-1)/2. + ig->offset_y );
cfloat wz = ig->dz *( (ig->nz-1)/2. + ig->offset_z );
cfloat ws = (cg->ns-1)/2. + cg->offset_s;
cfloat wt = (cg->nt-1)/2. + cg->offset_t;
//printf("wx=%d wy=%d wz=%d ws=%d wt=%d \n", wx, wy, wz, ws, wt);
#ifdef fdk_gpu
////prepare for multigpus
#if MULT_GPU
int dCnt = 0;
int selectedCudaDeviceId = 0;
cudaGetDeviceCount(&dCnt) ;
printf("number of cuda gpu devices: %d\n", dCnt);
if (dCnt > 0) {
if (dCnt > 1) {
int multiprocessor_cnt = 0;
cudaDeviceProp prop;
for (int deviceId=0; deviceId<dCnt; ++deviceId) {
if (cudaSuccess == cudaGetDeviceProperties(&prop, deviceId)) {
if (prop.multiProcessorCount > multiprocessor_cnt) {
multiprocessor_cnt = prop.multiProcessorCount;
selectedCudaDeviceId = deviceId;
}
}
}
} else {
selectedCudaDeviceId = 0;
}
printf("selected device with most multiprocessors: %d\n", selectedCudaDeviceId);
cudaSetDevice(selectedCudaDeviceId);
}
pthread_t threads[NUM_THREADS];
int rc;
long t;
for(t=0 ; t< NUM_THREADS ; t++){
printf("In main: creating thread %ld\n", t);
rc = pthread_create(&threads[t], NULL, PrintHello, (void *)t);
if (rc){
printf("ERROR; return code from pthread_create() is %d\n", rc);
exit(-1);
}
}
#endif
///// Load all this stuff into graphics memory
// image memory on device
float *dev_img;
jf_gpu_malloc(dev_img, nxyz)
// initialize device image to 0
jf_gpu_memset(dev_img, 0, nxyz)
#if USE_MASK
byte *dev_mask2;
jf_gpu_malloc(dev_mask2, nxy) // 2D mask
jf_gpu_put(dev_mask2, ig->mask2, nxy)
#endif
#if TEXMEMO
//printf("Using texture memory \n");
//use texture memory
cudaArray *dev_proj;
cudaChannelFormatDesc input_tex = cudaCreateChannelDesc<float>();
//cudaChannelFormatDesc input_tex = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaMallocArray(&dev_proj, &input_tex, cg->nt, cg->ns);
texRef.addressMode[0] = cudaAddressModeWrap;
texRef.addressMode[1] = cudaAddressModeWrap;
texRef.filterMode = cudaFilterModeLinear;
texRef.normalized = 0;
#else
//printf("Using global memory \n");
// one projection view on device
float *dev_proj;
jf_gpu_malloc(dev_proj, nst)
#endif
// s values for each x,y pair on device
float *dev_sval;
jf_gpu_malloc(dev_sval, nxy*3)
// initialize values to 0
jf_gpu_memset(dev_sval, 0, nxy)
//load the parameters to constant memory
cudaMemcpyToSymbol( "dc_wx", &wx, sizeof(float) );
cudaMemcpyToSymbol( "dc_wy", &wy, sizeof(float) );
cudaMemcpyToSymbol( "dc_wz", &wz, sizeof(float) );
cudaMemcpyToSymbol( "dc_ws", &ws, sizeof(float) );
cudaMemcpyToSymbol( "dc_wt", &wt, sizeof(float) );
cudaMemcpyToSymbol( "dc_nx", &(ig->nx), sizeof(int) );
cudaMemcpyToSymbol( "dc_ny", &(ig->ny), sizeof(int) );
cudaMemcpyToSymbol( "dc_nz", &(ig->nz), sizeof(int) );
cudaMemcpyToSymbol( "dc_dx", &(ig->dx), sizeof(float) );
cudaMemcpyToSymbol( "dc_dy", &(ig->dy), sizeof(float) );
cudaMemcpyToSymbol( "dc_dz", &(ig->dz), sizeof(float) );
cudaMemcpyToSymbol( "dc_ns", &(cg->ns), sizeof(int) );
cudaMemcpyToSymbol( "dc_nt", &(cg->nt), sizeof(int) );
cudaMemcpyToSymbol( "dc_ds", &(cg->ds), sizeof(float) );
cudaMemcpyToSymbol( "dc_dt", &(cg->dt), sizeof(float) );
cudaMemcpyToSymbol( "dc_dso", &(cg->dso), sizeof(float) );
cudaMemcpyToSymbol( "dc_dsd", &(cg->dsd), sizeof(float) );
cudaMemcpyToSymbol( "dc_dfs", &(cg->dfs), sizeof(float) );
cudaMemcpyToSymbol( "dc_nxy", &(nxy), sizeof(int) );
////decide the block and grid sturcture
dim3 dimBlock(16, 16);
dim3 dimGrid(iDivUp(ig->nx,dimBlock.x), iDivUp(ig->ny,dimBlock.y));
// NEW 5: case where nz <=512
// all the z's in the same block for a given (x,y)
dim3 dimBlock2(ig->nz, 1, 1);
int numBlock_x = ((ig->nz/512)+1)*ig->nx;
dim3 dimGrid2(numBlock_x, ig->ny);
#endif
//decide the shape of detector
truf is_arc = 0;
if (cg->dfs == 0)
is_arc = 1;
else if (!Isinf(cg->dfs))
Warn("dfs not done - junk!")
#ifdef fdk_gpu
for (int ia=0; ia < na; ia=ia+1, proj += nst) {
// copy this view to gpu
#if USE_MASK
#if TEXMEMO
cudaMemcpyToArray(dev_proj, 0, 0, proj, nst*sizeof(float),cudaMemcpyHostToDevice);
cudaBindTextureToArray(texRef, dev_proj,input_tex);
fdk_ts_back1_kernel<<<dimGrid, dimBlock>>>(dev_sval,is_arc,sinf(beta[ia]),cosf(beta[ia]),dev_mask2);
fdk_ts_back1_kernel_2<<<dimGrid2, dimBlock2>>>( dev_sval,dev_img,dev_mask2);
#else
jf_gpu_put(dev_proj, proj, nst);
fdk_ts_back1_kernel<<<dimGrid, dimBlock>>>(dev_sval,is_arc,sinf(beta[ia]),cosf(beta[ia]),dev_mask2);
fdk_ts_back1_kernel_2<<<dimGrid2, dimBlock2>>>( dev_sval,dev_img,dev_proj,dev_mask2);
#endif
#else
#if TEXMEMO
cudaMemcpyToArray(dev_proj, 0, 0, proj, nst*sizeof(float),cudaMemcpyHostToDevice);
cudaBindTextureToArray(texRef, dev_proj,input_tex);
fdk_ts_back1_kernel<<<dimGrid, dimBlock>>>(dev_sval,is_arc,sinf(beta[ia]),cosf(beta[ia]));
fdk_ts_back1_kernel_2<<<dimGrid2, dimBlock2>>>( dev_sval,dev_img);
#else
jf_gpu_put(dev_proj, proj, nst);
fdk_ts_back1_kernel<<<dimGrid, dimBlock>>>(dev_sval,is_arc,sinf(beta[ia]),cosf(beta[ia]));
fdk_ts_back1_kernel_2<<<dimGrid2, dimBlock2>>>( dev_sval,dev_img,dev_proj);
#endif
#endif
}
//Note("Copying image to host")
jf_gpu_get(pa->image, dev_img, nxyz) // caution: works only for 1 thread
//Note("freeing memory\n")
jf_gpu_free(dev_img)
cudaFree(dev_proj);
// jf_gpu_free(dev_proj)
jf_gpu_free(dev_sval)
#if USE_MASK
jf_gpu_free(dev_mask2)
#endif
#else
for (int ia=0; ia < na; ++ia, proj += nst) {
float *dev_img = pa->image; // already zeroed
cfloat *dev_proj = proj;
//cbyte *dev_mask2 = ig->mask2;
if (!fdk_ts_back(dev_img,
ig->nx, ig->ny, ig->nz,
ig->dx, ig->dy, ig->dz,
wx, wy , wz,
// dev_mask2, id + 1, // each thread does some voxels only
cg->dso, cg->dsd, cg->dfs,
cg->ns, cg->nt,
cg->ds, cg->dt, ws, wt,
dev_proj, beta[ia]),is_arc)
Fail("fdk_ts_back()")
}
#endif
Ok
}
sof fdk_ts_back_t(
float *image, // [nz nx ny] <- trick!
const cbct_ig *ig,
const cbct_cg *cg,
cint na, // # of views
cfloat *proj, // [nt ns na] <- trick! projection views
cdouble *beta, // [na] source angles [radians]
cint nthread, // # of threads
cint chat)
{
fdk_ts_s st;
#define put(arg) st.arg = arg;
put(image)
put(ig)
put(cg)
put(na)
put(proj)
put(beta)
#undef put
// TESTING
// printf("MY FILE COOLNESS! \n");
Bzero(image, ig->nx * ig->ny * ig->nz) // initialize image volume to 0
// Call(jf_thread1_top, (fdk_ts_back_init, NULL /* wrap up */, &st, nthread, Chat))
fdk_ts_back_init(&st, 4, 0);
Ok
}
|
ca3e7cd99436a436e53582a8ee36e373754469fe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "utils.hpp"
#include "update.hpp"
#include "memoryManager.hpp"
#include "cuStinger.hpp"
using namespace std;
#include <stdio.h>
#include <string.h>
// void initializeCuStinger(cuStingerConfig);
// __global__ void devInitVertexData(cuStinger* custing,uint8_t* temp)
__global__ void devInitVertexData(cuStinger::cusVertexData *dVD,vertexId_t nv,uint8_t* temp)
{
if(threadIdx.x!=0 || blockIdx.x!=0)
DEV_CUSTINGER_ERROR("Number of threads and thread blocks for initializing vertex should always be one");
// cuStinger::cusVertexData *dVD = custing->dVD;
dVD->mem = temp;
int32_t pos=0;
dVD->adj = (cuStinger::cusEdgeData**)(dVD->getMem() + pos); pos+=sizeof(cuStinger::cusEdgeData*)*nv;
dVD->edMem = (uint8_t**)(dVD->getMem() + pos); pos+=sizeof(uint8_t*)*nv;
dVD->used = (length_t*)(dVD->getMem() + pos); pos+=sizeof(length_t)*nv;
dVD->max = (length_t*)(dVD->getMem() + pos); pos+=sizeof(length_t)*nv;
dVD->vw = (vweight_t*)(dVD->getMem() + pos); pos+=sizeof(vweight_t)*nv;
dVD->vt = (vtype_t*)(dVD->getMem() + pos); pos+=sizeof(vtype_t)*nv;
}
void cuStinger::initVertexDataPointers(cuStinger::cusVertexData *dVD, uint8_t* temp){
// devInitVertexData<<<1,1>>>( d_cuStinger,temp);
hipLaunchKernelGGL(( devInitVertexData), dim3(1),dim3(1), 0, 0, dVD,nv,temp);
}
__global__ void devInitEdgeData(cuStinger* custing, int verticesPerThreadBlock){
vertexId_t v_init=blockIdx.x*verticesPerThreadBlock+threadIdx.x;
length_t nv = custing->getMaxNV();
for (vertexId_t v_hat=0; v_hat<verticesPerThreadBlock; v_hat+=blockDim.x){
vertexId_t v=v_init+v_hat;
if(v>=nv)
break;
//epv = edge per vertex
length_t epv = custing->dVD->getMax()[v];
int32_t pos=0;
cuStinger::cusEdgeData *dED = custing->dVD->adj[v];
dED->mem = custing->dVD->edMem[v];
dED->dst = (vertexId_t*)(dED->getMem() + pos); pos+=sizeof(vertexId_t)*epv;
dED->ew = (eweight_t*)(dED->getMem() + pos); pos+=sizeof(eweight_t)*epv;
dED->et = (etype_t*)(dED->getMem() + pos); pos+=sizeof(etype_t)*epv;
dED->t1 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv;
dED->t2 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv;
}
}
void cuStinger::initEdgeDataPointers(){
dim3 numBlocks(1, 1);
int32_t threads=64;
dim3 threadsPerBlock(threads, 1);
numBlocks.x = ceil((float)nv/(float)threads);
if (numBlocks.x>16000){
numBlocks.x=16000;
}
int32_t verticesPerThreadBlock = threads;
if(numBlocks.x>1)
verticesPerThreadBlock = ceil(float(nv)/float(numBlocks.x-1));
hipLaunchKernelGGL(( devInitEdgeData), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_cuStinger,verticesPerThreadBlock);
}
__global__ void devMakeGPUStinger(vertexId_t* d_off, length_t* d_adj,
int verticesPerThreadBlock,cuStinger* custing){
length_t* d_utilized = custing->dVD->getUsed();
length_t* d_max = custing->dVD->getMax();
int32_t v_init=blockIdx.x*verticesPerThreadBlock;
for (int v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){
int32_t v=v_init+v_hat;
if(v>=custing->nv)
break;
cuStinger::cusEdgeData* adjv = custing->dVD->adj[v];
for(int32_t e=threadIdx.x; e<d_utilized[v]; e+=blockDim.x){
adjv->dst[e]=d_adj[d_off[v]+e];
}
for(int32_t e=threadIdx.x + d_utilized[v]; e < d_max[v]; e+=blockDim.x){
adjv->dst[e]=DELETION_MARKER;
}
}
}
void cuStinger::internalCSRTocuStinger(length_t* h_off, vertexId_t* h_adj, length_t ne){
length_t* d_off = (length_t*)allocDeviceArray(nv+1,sizeof(length_t));
vertexId_t* d_adj = (vertexId_t*)allocDeviceArray(ne,sizeof(vertexId_t));
copyArrayHostToDevice(h_off,d_off,nv+1,sizeof(length_t));
copyArrayHostToDevice(h_adj,d_adj,ne,sizeof(vertexId_t));
dim3 numBlocks(1, 1);
int32_t threads=64;
dim3 threadsPerBlock(threads, 1);
numBlocks.x = ceil((float)nv/(float)threads);
if (numBlocks.x>16000){
numBlocks.x=16000;
}
int32_t verticesPerThreadBlock;
if(numBlocks.x==1)
verticesPerThreadBlock=nv;
else
verticesPerThreadBlock= ceil(float(nv)/float(numBlocks.x-1));
hipLaunchKernelGGL(( devMakeGPUStinger), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_off,d_adj,verticesPerThreadBlock, d_cuStinger);
freeDeviceArray(d_adj);
freeDeviceArray(d_off);
}
#define SUM_BLOCK_SIZE 512
__global__ void devSumArray(length_t * input, length_t * output, length_t len) {
__shared__ length_t partialSum[2 * SUM_BLOCK_SIZE];
//Load a segment of the input vector into shared memory
length_t tid = threadIdx.x, start = 2 * blockIdx.x * SUM_BLOCK_SIZE;
if (start + tid < len)
partialSum[tid] = input[start + tid];
else
partialSum[tid] = 0;
if (start + SUM_BLOCK_SIZE + tid < len)
partialSum[SUM_BLOCK_SIZE + tid] = input[start + SUM_BLOCK_SIZE + tid];
else
partialSum[SUM_BLOCK_SIZE + tid] = 0;
//Traverse the reduction tree
for (int stride = SUM_BLOCK_SIZE; stride >= 1; stride >>= 1) {
__syncthreads();
if (tid < stride)
partialSum[tid] += partialSum[tid+stride];
}
//Write the computed sum of the block to the output vector at the correct index
if (tid == 0)
output[blockIdx.x] = partialSum[0];
}
length_t cuStinger::sumDeviceArray(length_t* arr, length_t len){
length_t numOutputElements = len / (SUM_BLOCK_SIZE<<1);
if (len % (SUM_BLOCK_SIZE<<1)) {
numOutputElements++;
}
length_t* d_out = (length_t*)allocDeviceArray(len, sizeof(length_t*));
hipLaunchKernelGGL(( devSumArray), dim3(numOutputElements),dim3(SUM_BLOCK_SIZE), 0, 0, arr,d_out,len);
length_t* h_out = (length_t*)allocHostArray(len, sizeof(length_t*));
length_t sum=0;
copyArrayDeviceToHost(d_out, h_out, len, sizeof(length_t));
for(int i=0; i<numOutputElements; i++){
sum+=h_out[i];
}
freeHostArray(h_out);
freeDeviceArray(d_out);
return sum;
}
__global__ void deviceCopyMultipleAdjacencies(cuStinger* custing, cuStinger::cusVertexData* olddVD,
vertexId_t* requireUpdates, length_t requireCount ,length_t verticesPerThreadBlock)
{
// int32_t** d_cuadj = custing->d_adj;
// length_t* d_utilized = custing->getDeviceUsed();
length_t v_init=blockIdx.x*verticesPerThreadBlock;
for (int v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){
if((v_init+v_hat)>=requireCount)
break;
vertexId_t v=requireUpdates[v_init+v_hat];
cuStinger::cusEdgeData *dED = custing->dVD->adj[v];
cuStinger::cusEdgeData *olddED = olddVD->adj[v];
//epv = edge per vertex
length_t epv = olddVD->getMax()[v];
int32_t pos=0;
dED->mem = custing->dVD->edMem[v];
dED->dst = (vertexId_t*)(dED->getMem() + pos); pos+=sizeof(vertexId_t)*epv;
dED->ew = (eweight_t*)(dED->getMem() + pos); pos+=sizeof(eweight_t)*epv;
dED->et = (etype_t*)(dED->getMem() + pos); pos+=sizeof(etype_t)*epv;
dED->t1 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv;
dED->t2 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv;
__syncthreads();
for(length_t e=threadIdx.x; e<olddVD->getUsed()[v]; e+=blockDim.x){
dED->dst[e] = olddED->dst[e];
if(custing->isSemantic){
dED->ew[e] = olddED->ew[e];
}
else if(custing->useEWeight){
dED->ew[e] = olddED->ew[e];
dED->et[e] = olddED->et[e];
dED->t1[e] = olddED->t1[e];
dED->t2[e] = olddED->t1[e];
}
}
}
}
void cuStinger::copyMultipleAdjacencies(cusVertexData* olddVD,
vertexId_t* requireUpdates, length_t requireCount){
dim3 numBlocks(1, 1);
int32_t threads=32;
dim3 threadsPerBlock(threads, 1);
numBlocks.x = ceil((float)requireCount);
if (numBlocks.x>16000){
numBlocks.x=16000;
}
int32_t verticesPerThreadBlock;
if(numBlocks.x == requireCount)
verticesPerThreadBlock=1;
else
verticesPerThreadBlock = ceil(float(requireCount)/float(numBlocks.x-1));
hipLaunchKernelGGL(( deviceCopyMultipleAdjacencies), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_cuStinger,
olddVD, requireUpdates, requireCount, verticesPerThreadBlock);
checkLastCudaError("Error in the first update sweep");
}
__global__ void deviceCheckForDuplicateEdges(cuStinger* custing, length_t verticesPerThreadBlock)
{
vertexId_t v_init=blockIdx.x*verticesPerThreadBlock+threadIdx.x;
length_t nv = custing->getMaxNV();
__shared__ int dupFound;
for (vertexId_t v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){
vertexId_t v=v_init+v_hat;
if(v>=nv)
break;
length_t edges = custing->dVD->getUsed()[v];
cuStinger::cusEdgeData *dED = custing->dVD->adj[v];
// if(v ==45788 && threadIdx.x==0){
// for(length_t e=0; e<edges; e++)
// printf("%d ,",dED->dst[e]);
// printf("\n");
// }
for (length_t e=0; e<edges; e++){
vertexId_t currDest=dED->dst[e];
dupFound=-1;
__syncthreads();
for (length_t e2=0; e2<edges; e2+=blockDim.x){
vertexId_t currDest2 = dED->dst[e2];
if(currDest==currDest2 && e!=e2){
dupFound=e2;
}
}
__syncthreads();
if(dupFound!=-1)
printf("DUP FOUND IN CUSTIGER\n");
}
}
}
void cuStinger::checkDuplicateEdges(){
dim3 numBlocks(1, 1);
int32_t threads=32;
dim3 threadsPerBlock(threads, 1);
int32_t verticesPerThreadBlock;
numBlocks.x = ceil((float)nv/(float)threads);
if (numBlocks.x>16000){
numBlocks.x=16000;
}
verticesPerThreadBlock = ceil(float(nv)/float(numBlocks.x));
// cout << "checkDuplicateEdges : " << verticesPerThreadBlock<< endl;
// cout << "checkDuplicateEdges : " << numBlocks.x << endl;
// cout << "Deletions : " << threadsPerBlock.x << endl;
hipLaunchKernelGGL(( deviceCheckForDuplicateEdges), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_cuStinger,
verticesPerThreadBlock);
checkLastCudaError("Error in the first update sweep");
}
|
ca3e7cd99436a436e53582a8ee36e373754469fe.cu
|
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "utils.hpp"
#include "update.hpp"
#include "memoryManager.hpp"
#include "cuStinger.hpp"
using namespace std;
#include <stdio.h>
#include <string.h>
// void initializeCuStinger(cuStingerConfig);
// __global__ void devInitVertexData(cuStinger* custing,uint8_t* temp)
__global__ void devInitVertexData(cuStinger::cusVertexData *dVD,vertexId_t nv,uint8_t* temp)
{
if(threadIdx.x!=0 || blockIdx.x!=0)
DEV_CUSTINGER_ERROR("Number of threads and thread blocks for initializing vertex should always be one");
// cuStinger::cusVertexData *dVD = custing->dVD;
dVD->mem = temp;
int32_t pos=0;
dVD->adj = (cuStinger::cusEdgeData**)(dVD->getMem() + pos); pos+=sizeof(cuStinger::cusEdgeData*)*nv;
dVD->edMem = (uint8_t**)(dVD->getMem() + pos); pos+=sizeof(uint8_t*)*nv;
dVD->used = (length_t*)(dVD->getMem() + pos); pos+=sizeof(length_t)*nv;
dVD->max = (length_t*)(dVD->getMem() + pos); pos+=sizeof(length_t)*nv;
dVD->vw = (vweight_t*)(dVD->getMem() + pos); pos+=sizeof(vweight_t)*nv;
dVD->vt = (vtype_t*)(dVD->getMem() + pos); pos+=sizeof(vtype_t)*nv;
}
void cuStinger::initVertexDataPointers(cuStinger::cusVertexData *dVD, uint8_t* temp){
// devInitVertexData<<<1,1>>>( d_cuStinger,temp);
devInitVertexData<<<1,1>>>( dVD,nv,temp);
}
__global__ void devInitEdgeData(cuStinger* custing, int verticesPerThreadBlock){
vertexId_t v_init=blockIdx.x*verticesPerThreadBlock+threadIdx.x;
length_t nv = custing->getMaxNV();
for (vertexId_t v_hat=0; v_hat<verticesPerThreadBlock; v_hat+=blockDim.x){
vertexId_t v=v_init+v_hat;
if(v>=nv)
break;
//epv = edge per vertex
length_t epv = custing->dVD->getMax()[v];
int32_t pos=0;
cuStinger::cusEdgeData *dED = custing->dVD->adj[v];
dED->mem = custing->dVD->edMem[v];
dED->dst = (vertexId_t*)(dED->getMem() + pos); pos+=sizeof(vertexId_t)*epv;
dED->ew = (eweight_t*)(dED->getMem() + pos); pos+=sizeof(eweight_t)*epv;
dED->et = (etype_t*)(dED->getMem() + pos); pos+=sizeof(etype_t)*epv;
dED->t1 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv;
dED->t2 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv;
}
}
void cuStinger::initEdgeDataPointers(){
dim3 numBlocks(1, 1);
int32_t threads=64;
dim3 threadsPerBlock(threads, 1);
numBlocks.x = ceil((float)nv/(float)threads);
if (numBlocks.x>16000){
numBlocks.x=16000;
}
int32_t verticesPerThreadBlock = threads;
if(numBlocks.x>1)
verticesPerThreadBlock = ceil(float(nv)/float(numBlocks.x-1));
devInitEdgeData<<<numBlocks,threadsPerBlock>>>( d_cuStinger,verticesPerThreadBlock);
}
__global__ void devMakeGPUStinger(vertexId_t* d_off, length_t* d_adj,
int verticesPerThreadBlock,cuStinger* custing){
length_t* d_utilized = custing->dVD->getUsed();
length_t* d_max = custing->dVD->getMax();
int32_t v_init=blockIdx.x*verticesPerThreadBlock;
for (int v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){
int32_t v=v_init+v_hat;
if(v>=custing->nv)
break;
cuStinger::cusEdgeData* adjv = custing->dVD->adj[v];
for(int32_t e=threadIdx.x; e<d_utilized[v]; e+=blockDim.x){
adjv->dst[e]=d_adj[d_off[v]+e];
}
for(int32_t e=threadIdx.x + d_utilized[v]; e < d_max[v]; e+=blockDim.x){
adjv->dst[e]=DELETION_MARKER;
}
}
}
void cuStinger::internalCSRTocuStinger(length_t* h_off, vertexId_t* h_adj, length_t ne){
length_t* d_off = (length_t*)allocDeviceArray(nv+1,sizeof(length_t));
vertexId_t* d_adj = (vertexId_t*)allocDeviceArray(ne,sizeof(vertexId_t));
copyArrayHostToDevice(h_off,d_off,nv+1,sizeof(length_t));
copyArrayHostToDevice(h_adj,d_adj,ne,sizeof(vertexId_t));
dim3 numBlocks(1, 1);
int32_t threads=64;
dim3 threadsPerBlock(threads, 1);
numBlocks.x = ceil((float)nv/(float)threads);
if (numBlocks.x>16000){
numBlocks.x=16000;
}
int32_t verticesPerThreadBlock;
if(numBlocks.x==1)
verticesPerThreadBlock=nv;
else
verticesPerThreadBlock= ceil(float(nv)/float(numBlocks.x-1));
devMakeGPUStinger<<<numBlocks,threadsPerBlock>>>(d_off,d_adj,verticesPerThreadBlock, d_cuStinger);
freeDeviceArray(d_adj);
freeDeviceArray(d_off);
}
#define SUM_BLOCK_SIZE 512
__global__ void devSumArray(length_t * input, length_t * output, length_t len) {
__shared__ length_t partialSum[2 * SUM_BLOCK_SIZE];
//Load a segment of the input vector into shared memory
length_t tid = threadIdx.x, start = 2 * blockIdx.x * SUM_BLOCK_SIZE;
if (start + tid < len)
partialSum[tid] = input[start + tid];
else
partialSum[tid] = 0;
if (start + SUM_BLOCK_SIZE + tid < len)
partialSum[SUM_BLOCK_SIZE + tid] = input[start + SUM_BLOCK_SIZE + tid];
else
partialSum[SUM_BLOCK_SIZE + tid] = 0;
//Traverse the reduction tree
for (int stride = SUM_BLOCK_SIZE; stride >= 1; stride >>= 1) {
__syncthreads();
if (tid < stride)
partialSum[tid] += partialSum[tid+stride];
}
//Write the computed sum of the block to the output vector at the correct index
if (tid == 0)
output[blockIdx.x] = partialSum[0];
}
length_t cuStinger::sumDeviceArray(length_t* arr, length_t len){
length_t numOutputElements = len / (SUM_BLOCK_SIZE<<1);
if (len % (SUM_BLOCK_SIZE<<1)) {
numOutputElements++;
}
length_t* d_out = (length_t*)allocDeviceArray(len, sizeof(length_t*));
devSumArray<<<numOutputElements,SUM_BLOCK_SIZE>>>(arr,d_out,len);
length_t* h_out = (length_t*)allocHostArray(len, sizeof(length_t*));
length_t sum=0;
copyArrayDeviceToHost(d_out, h_out, len, sizeof(length_t));
for(int i=0; i<numOutputElements; i++){
sum+=h_out[i];
}
freeHostArray(h_out);
freeDeviceArray(d_out);
return sum;
}
__global__ void deviceCopyMultipleAdjacencies(cuStinger* custing, cuStinger::cusVertexData* olddVD,
vertexId_t* requireUpdates, length_t requireCount ,length_t verticesPerThreadBlock)
{
// int32_t** d_cuadj = custing->d_adj;
// length_t* d_utilized = custing->getDeviceUsed();
length_t v_init=blockIdx.x*verticesPerThreadBlock;
for (int v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){
if((v_init+v_hat)>=requireCount)
break;
vertexId_t v=requireUpdates[v_init+v_hat];
cuStinger::cusEdgeData *dED = custing->dVD->adj[v];
cuStinger::cusEdgeData *olddED = olddVD->adj[v];
//epv = edge per vertex
length_t epv = olddVD->getMax()[v];
int32_t pos=0;
dED->mem = custing->dVD->edMem[v];
dED->dst = (vertexId_t*)(dED->getMem() + pos); pos+=sizeof(vertexId_t)*epv;
dED->ew = (eweight_t*)(dED->getMem() + pos); pos+=sizeof(eweight_t)*epv;
dED->et = (etype_t*)(dED->getMem() + pos); pos+=sizeof(etype_t)*epv;
dED->t1 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv;
dED->t2 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv;
__syncthreads();
for(length_t e=threadIdx.x; e<olddVD->getUsed()[v]; e+=blockDim.x){
dED->dst[e] = olddED->dst[e];
if(custing->isSemantic){
dED->ew[e] = olddED->ew[e];
}
else if(custing->useEWeight){
dED->ew[e] = olddED->ew[e];
dED->et[e] = olddED->et[e];
dED->t1[e] = olddED->t1[e];
dED->t2[e] = olddED->t1[e];
}
}
}
}
void cuStinger::copyMultipleAdjacencies(cusVertexData* olddVD,
vertexId_t* requireUpdates, length_t requireCount){
dim3 numBlocks(1, 1);
int32_t threads=32;
dim3 threadsPerBlock(threads, 1);
numBlocks.x = ceil((float)requireCount);
if (numBlocks.x>16000){
numBlocks.x=16000;
}
int32_t verticesPerThreadBlock;
if(numBlocks.x == requireCount)
verticesPerThreadBlock=1;
else
verticesPerThreadBlock = ceil(float(requireCount)/float(numBlocks.x-1));
deviceCopyMultipleAdjacencies<<<numBlocks,threadsPerBlock>>>(d_cuStinger,
olddVD, requireUpdates, requireCount, verticesPerThreadBlock);
checkLastCudaError("Error in the first update sweep");
}
__global__ void deviceCheckForDuplicateEdges(cuStinger* custing, length_t verticesPerThreadBlock)
{
vertexId_t v_init=blockIdx.x*verticesPerThreadBlock+threadIdx.x;
length_t nv = custing->getMaxNV();
__shared__ int dupFound;
for (vertexId_t v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){
vertexId_t v=v_init+v_hat;
if(v>=nv)
break;
length_t edges = custing->dVD->getUsed()[v];
cuStinger::cusEdgeData *dED = custing->dVD->adj[v];
// if(v ==45788 && threadIdx.x==0){
// for(length_t e=0; e<edges; e++)
// printf("%d ,",dED->dst[e]);
// printf("\n");
// }
for (length_t e=0; e<edges; e++){
vertexId_t currDest=dED->dst[e];
dupFound=-1;
__syncthreads();
for (length_t e2=0; e2<edges; e2+=blockDim.x){
vertexId_t currDest2 = dED->dst[e2];
if(currDest==currDest2 && e!=e2){
dupFound=e2;
}
}
__syncthreads();
if(dupFound!=-1)
printf("DUP FOUND IN CUSTIGER\n");
}
}
}
void cuStinger::checkDuplicateEdges(){
dim3 numBlocks(1, 1);
int32_t threads=32;
dim3 threadsPerBlock(threads, 1);
int32_t verticesPerThreadBlock;
numBlocks.x = ceil((float)nv/(float)threads);
if (numBlocks.x>16000){
numBlocks.x=16000;
}
verticesPerThreadBlock = ceil(float(nv)/float(numBlocks.x));
// cout << "checkDuplicateEdges : " << verticesPerThreadBlock<< endl;
// cout << "checkDuplicateEdges : " << numBlocks.x << endl;
// cout << "Deletions : " << threadsPerBlock.x << endl;
deviceCheckForDuplicateEdges<<<numBlocks,threadsPerBlock>>>(d_cuStinger,
verticesPerThreadBlock);
checkLastCudaError("Error in the first update sweep");
}
|
e875f67c11126d17dc667433e42a9d3c835a3f7f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Find high divergence points of a vector field
// --- Input: 1. normalized 3D vector field
//
// dFx dFy dFz
// divergence = ----- + ----- + -----
// dx dy dz
//
// --- Output: highest ...% divergence point list
// --- Author: Nicu D. Cornea, Vizlab, Rutgers University
// --- Date: Wed Aug 20 17:53:56 EDT 2003
//
#include "HighDiverg.h"
// #define TRACE
#define SEARCH_GRID 1
#define CELL_SIZE 1.00 / SEARCH_GRID
#define MAX_NUM_HDPTS 5000
typedef struct Lock
{
int *mutex;
Lock()
{
int state = 0;
hipMalloc((void **)&mutex,sizeof(int));
hipMemset(mutex,0, sizeof(int));
}
~Lock()
{
hipFree(mutex);
}
__device__ void lock()
{
while(atomicCAS(mutex, 0, 1) != 0);
}
__device__ void unlock()
{
atomicExch(mutex, 0);
}
}Lock;
typedef struct {
int* Points;
int numPoints;
} HDGroup;
inline bool PointIsCloseToGroup(int pt, int grp, HDGroup *Groups, VoxelPositionDouble **HDPts);
__host__ __device__ Vector interpolation(double x, double y, double z, int sizx, int sizy, int sizz, Vector *forcevec);
__global__ void max_min_divergence(unsigned char *flags,Vector *ForceField,double *maxDiv,double *minDiv,bool inOut,int slsz,int L, int M, int N, double vdist,Lock* mylock)
{
int k=blockIdx.x;
int j=blockIdx.y;
int i=blockIdx.z;
double div;
int idx=k*slsz + j*L +i;
double x, y, z;
if(!inOut) {
// - if this point is EXTERIOR, BOUNDARY or SURF, skip it
if( (flags[idx] == EXTERIOR) ||
(flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
return;
}
}
else {
// we look for high divergence points outside the object too
// ignore only boundary points.
if( (flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
return;
}
}
for(int kk=0; kk < SEARCH_GRID; kk++) {
for(int jj=0; jj < SEARCH_GRID; jj++) {
for(int ii=0; ii < SEARCH_GRID; ii++) {
x = i + (ii * CELL_SIZE);
y = j + (jj * CELL_SIZE);
z = k + (kk * CELL_SIZE);
#ifdef TRACE
// printf("At point: (%lf, %lf, %lf)\n", x, y, z);
#endif
// interpolate force vectors arround the point
Vector v_0 = interpolation(x + vdist, y, z, L, M, N, ForceField);
Vector v_1 = interpolation(x - vdist, y, z, L, M, N, ForceField);
Vector v_2 = interpolation(x, y + vdist, z, L, M, N, ForceField);
Vector v_3 = interpolation(x, y - vdist, z, L, M, N, ForceField);
Vector v_4 = interpolation(x, y, z + vdist, L, M, N, ForceField);
Vector v_5 = interpolation(x, y, z - vdist, L, M, N, ForceField);
div = ((v_0.xd - v_1.xd) + (v_2.yd - v_3.yd) + (v_4.zd - v_5.zd)) / (2 * vdist);
#ifdef TRACE
/*
printf("Forces:\n");
for(s = 0; s < 6; s++) {
printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
}
printf("Div = %lf\n", div);
*/
#endif
mylock->lock();
if(div > *maxDiv)
{
*maxDiv = div;
}
if(div < *minDiv)
{
*minDiv = div;
}div;
mylock->unlock();
}
}
}
}
// double GetDiv(double x, double y, double z);
bool GetHighDivergencePoints(
Vector* ForceField, // [in] vector field
int L, int M, int N, // [in] size of vector field (X, Y and Z)
unsigned char *flags, // [in] flags array
float perc, // [in] percentage of high div. points
// to be returned (top <perc> %)
VoxelPositionDouble **HDPts, // [out] high divergence point list
int *numHDPts, // [out] number of points in the list
bool inOut // [in] flag specifying if we should look
// outside the object too (if true).
// DEFAULT: false
) {
#ifdef TRACE
printf("TRACE: Starting GetHighDivergencePoints function. Cellsize = %lf\n", CELL_SIZE);
#endif
(*HDPts) = NULL;
(*numHDPts) = 0;
if(perc == 0) {
return true;
}
long idx, slsz;
int i,j,k, ii, jj, kk, s;
double x, y, z;
long cntz, cntnz;
slsz = L*M; // slice size
double adiv[MAX_NUM_HDPTS]; // divergence array
if(((*HDPts) = new VoxelPositionDouble[MAX_NUM_HDPTS]) == NULL) {
printf("GetHighDivergencePoints: UPS! - Error allocating memory for the output array. Abort.\n");
exit(1);
}
// calculate divergence throughout the dataset
double maxDiv = -999999.99;
double minDiv = 999999.99;
double div;
cntz = 0;
cntnz = 0;
double zerodiv = 0.1;
/////////////////////////////////////
Vector v[6];
double vdist = (CELL_SIZE) / 2.00;
#ifdef TRACE
printf("vdist = %lf\n", vdist);
#endif
printf("Finding high divergence points (1).\n");
unsigned char *d_flags;
Vector *d_ForceField;
double *d_maxDiv;
double *d_minDiv;
hipMalloc((void **)&d_flags,sizeof(unsigned char)*L*M*N);
hipMalloc((void **)&d_ForceField,sizeof(Vector)*L*M*N);
hipMalloc((void **)&d_minDiv,sizeof(double));
hipMalloc((void **)&d_maxDiv,sizeof(double));
hipMemcpy(d_flags,flags,sizeof(unsigned char)*L*M*N,hipMemcpyHostToDevice);
hipMemcpy(d_ForceField,ForceField,sizeof(Vector)*L*M*N,hipMemcpyHostToDevice);
hipMemcpy(d_maxDiv,&maxDiv,sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_minDiv,&minDiv,sizeof(double),hipMemcpyHostToDevice);
dim3 dimBlock(1);
dim3 dimGrid(N,M,L);
Lock *mylock=new Lock();
hipLaunchKernelGGL(( max_min_divergence), dim3(dimGrid),dim3(dimBlock), 0, 0, d_flags,d_ForceField,d_maxDiv,d_minDiv,inOut,slsz,L,M,N,vdist,mylock);
hipMemcpy(&maxDiv,d_maxDiv,sizeof(double),hipMemcpyDeviceToHost);
hipMemcpy(&minDiv,d_minDiv,sizeof(double),hipMemcpyDeviceToHost);
// for (k = 1; k < N-1; k++) {
// printf("\tProcessing plane %d out of %d\r", k, N-2);
// fflush(stdout);
// for (j = 1; j < M-1; j++) {
// for (i = 1; i < L-1; i++) {
// idx = k*slsz + j*L +i;
// // if we are not looking outside the object too.
// if(!inOut) {
// // - if this point is EXTERIOR, BOUNDARY or SURF, skip it
// if( (flags[idx] == EXTERIOR) ||
// (flags[idx] == BOUNDARY) ||
// (flags[idx] == SURF))
// {
// continue;
// }
// }
// else {
// // we look for high divergence points outside the object too
// // ignore only boundary points.
// if( (flags[idx] == BOUNDARY) ||
// (flags[idx] == SURF))
// {
// continue;
// }
// }
// for(kk=0; kk < SEARCH_GRID; kk++) {
// for(jj=0; jj < SEARCH_GRID; jj++) {
// for(ii=0; ii < SEARCH_GRID; ii++) {
// x = i + (ii * CELL_SIZE);
// y = j + (jj * CELL_SIZE);
// z = k + (kk * CELL_SIZE);
// #ifdef TRACE
// // printf("At point: (%lf, %lf, %lf)\n", x, y, z);
// #endif
// // interpolate force vectors arround the point
// v[0] = interpolation(x + vdist, y, z, L, M, N, ForceField);
// v[1] = interpolation(x - vdist, y, z, L, M, N, ForceField);
// v[2] = interpolation(x, y + vdist, z, L, M, N, ForceField);
// v[3] = interpolation(x, y - vdist, z, L, M, N, ForceField);
// v[4] = interpolation(x, y, z + vdist, L, M, N, ForceField);
// v[5] = interpolation(x, y, z - vdist, L, M, N, ForceField);
// div = ((v[0].xd - v[1].xd) + (v[2].yd - v[3].yd) + (v[4].zd - v[5].zd)) / (2 * vdist);
// if((div > -zerodiv) && (div < zerodiv)) {
// cntz++;
// }
// else {
// cntnz++;
// }
// #ifdef TRACE
// /*
// printf("Forces:\n");
// for(s = 0; s < 6; s++) {
// printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
// }
// printf("Div = %lf\n", div);
// */
// #endif
// if(div > maxDiv) {
// maxDiv = div;
// }
// if(div < minDiv) {
// minDiv = div;
// }div;
// }
// }
// }
// }
// }
// }
#ifdef _DEBUG
printf("Divergence: max = %lf, min = %lf\n", maxDiv, minDiv);
#endif
double threshold;
// case 1:
// take <perc> percent of the lowest negative value:
// !! have to change the comparison
threshold = maxDiv - minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = minDiv + threshold;
/*
// case 2:
// take <perc> percent of the highest pozitive value:
// !! have to change the comparison
// NOT GOOD
threshold = maxDiv - minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = maxDiv - threshold;
*/
/*
// case 3:
// take <perc> percent of the lowest value (must be negative):
// !! have to change the comparison
// NOT GOOD
threshold = minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = minDiv - threshold;
*/
#ifdef _DEBUG
printf("Threshold set to: %lf\n", threshold);
printf("Number of close to 0 divergence points [-%lf..%lf]: %ld. \n \
Number of non 0 divergence points: %ld.\n",
zerodiv, zerodiv, cntz, cntnz);
#endif
printf("Finding high divergence points (2).\n");
for (k = 1; k < N-1; k++) {
printf("\tProcessing plane %d out of %d\r", k, N-2);
fflush(stdout);
for (j = 1; j < M-1; j++) {
for (i = 1; i < L-1; i++) {
idx = k*slsz + j*L +i;
if(!inOut) {
// - if this point is EXTERIOR, BOUNDARY or SURF, skip it
if( (flags[idx] == EXTERIOR) ||
(flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
continue;
}
}
else {
// we look for high divergence points outside the object too
// ignore only boundary points.
if( (flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
continue;
}
}
for(kk=0; kk < SEARCH_GRID; kk++) {
for(jj=0; jj < SEARCH_GRID; jj++) {
for(ii=0; ii < SEARCH_GRID; ii++) {
x = i + (ii * CELL_SIZE);
y = j + (jj * CELL_SIZE);
z = k + (kk * CELL_SIZE);
#ifdef TRACE
// printf("At point: (%lf, %lf, %lf)\n", x, y, z);
#endif
// interpolate force vectors arround the point
v[0] = interpolation(x + vdist, y, z, L, M, N, ForceField);
v[1] = interpolation(x - vdist, y, z, L, M, N, ForceField);
v[2] = interpolation(x, y + vdist, z, L, M, N, ForceField);
v[3] = interpolation(x, y - vdist, z, L, M, N, ForceField);
v[4] = interpolation(x, y, z + vdist, L, M, N, ForceField);
v[5] = interpolation(x, y, z - vdist, L, M, N, ForceField);
div = ((v[0].xd - v[1].xd) + (v[2].yd - v[3].yd) + (v[4].zd - v[5].zd)) / (2 * vdist);
#ifdef TRACE
/*
printf("Forces:\n");
for(s = 0; s < 6; s++) {
printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
}
printf("Div = %lf\n", div);
*/
#endif
if(div <= threshold) {
// add the point to the HD list
(*HDPts)[(*numHDPts)].x = x;
(*HDPts)[(*numHDPts)].y = y;
(*HDPts)[(*numHDPts)].z = z;
adiv[(*numHDPts)] = div;
(*numHDPts) = (*numHDPts) + 1;
if((*numHDPts) >= MAX_NUM_HDPTS) {
printf("UPS! Too many high divergence points detected. \
Reached maximum of %d. Abort\n", MAX_NUM_HDPTS);
exit(1);
}
}
}
}
}
}
}
}
//
// sort the points on the divergence value;
//
double minval, tmp;
int minpos;
for(i=0; i < (*numHDPts); i++) {
minval = adiv[i];
minpos = i;
for(j=i+1; j < (*numHDPts); j++) {
if(adiv[j] < minval) {
minval = adiv[j];
minpos = j;
}
}
if(minpos != i) {
// exchange points and div values
tmp = adiv[i];
adiv[i] = adiv[minpos];
adiv[minpos] = tmp;
tmp = (*HDPts)[i].x; (*HDPts)[i].x = (*HDPts)[minpos].x; (*HDPts)[minpos].x = tmp;
tmp = (*HDPts)[i].y; (*HDPts)[i].y = (*HDPts)[minpos].y; (*HDPts)[minpos].y = tmp;
tmp = (*HDPts)[i].z; (*HDPts)[i].z = (*HDPts)[minpos].z; (*HDPts)[minpos].z = tmp;
}
}
#ifdef TRACE
printf("Points: \n");
for(i=0; i < (*numHDPts); i++) {
printf("%f %f %f - %f\n", (*HDPts)[i].x, (*HDPts)[i].y, (*HDPts)[i].z, adiv[i]);
}
#endif
//
// cluster the points
//
// Algorithm:
// First point creates the first group.
// For all the other points:
// If the point is close to an existing group
// add the point to that group
// else
// the point starts a new group
// endif
// endfor
// end
//
// initialize data structure
HDGroup *Groups;
int numGroups = 0;
if((Groups = new HDGroup[(*numHDPts)]) == NULL) {
printf("Error allocating memory for working data structures. Abort\n");
exit(1);
}
for(i=0; i < (*numHDPts); i++) {
if((Groups[i].Points = new int[(*numHDPts)]) == NULL) {
printf("Error allocating memory for working data structures. Abort\n");
exit(1);
}
Groups[i].numPoints = 0;
}
bool closeToSomeGroup = false;
// first point creates the first group
Groups[0].Points[0] = 0;
Groups[0].numPoints = 1;
numGroups = 1;
for(i=1; i < (*numHDPts); i++) {
closeToSomeGroup = false;
for(j=0; j < numGroups; j++) {
if(PointIsCloseToGroup(i, j, Groups, HDPts)) {
// add the point to that group
Groups[j].Points[Groups[j].numPoints] = i;
Groups[j].numPoints = Groups[j].numPoints + 1;
closeToSomeGroup = true;
break;
}
}
if(!closeToSomeGroup) {
// start a new group
Groups[numGroups].Points[0] = i;
Groups[numGroups].numPoints = 1;
numGroups++;
}
}
#ifdef TRACE
// print the clustered points:
printf("Clustered points:\n");
for(i=0; i < numGroups; i++) {
printf("%f %f %f\n",
(*HDPts)[Groups[i].Points[0]].x, (*HDPts)[Groups[i].Points[0]].y, (*HDPts)[Groups[i].Points[0]].z);
for(j=1; j < Groups[i].numPoints; j++) {
printf("\t%f %f %f\n",
(*HDPts)[Groups[i].Points[j]].x, (*HDPts)[Groups[i].Points[j]].y, (*HDPts)[Groups[i].Points[j]].z);
}
}
#endif
//
// Return only the first point in each group as the high divergence points
//
VoxelPositionDouble* newHDPts;
if((newHDPts = new VoxelPositionDouble[numGroups]) == NULL) {
printf("GetHighDivergencePoints: UPS! - Error allocating memory for the output array. Abort.\n");
exit(1);
}
for(i=0; i < numGroups; i++) {
newHDPts[i].x = (*HDPts)[Groups[i].Points[0]].x;
newHDPts[i].y = (*HDPts)[Groups[i].Points[0]].y;
newHDPts[i].z = (*HDPts)[Groups[i].Points[0]].z;
}
// delete the old array
delete [] (*HDPts);
// delete Group data structure
for(i=0; i < numGroups; i++) {
delete [] Groups[i].Points;
}
delete [] Groups;
// return the new array
(*HDPts) = newHDPts;
(*numHDPts) = numGroups;
#ifdef TRACE
printf("Returning points: \n");
for(i=0; i < (*numHDPts); i++) {
printf("%f %f %f - %f\n", (*HDPts)[i].x, (*HDPts)[i].y, (*HDPts)[i].z, adiv[i]);
}
#endif
return true;
}
__device__ __host__ inline Vector interpolation(double x, double y, double z, int sizx, int sizy, int sizz, Vector *forcevec)
{
float alpha, beta, gamma;
Vector forceInt;
long slsz;
alpha=x-int(x);
beta=y-int(y);
gamma=z-int(z);
slsz=sizy*sizx;
forceInt.xd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].xd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].xd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].xd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].xd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].xd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].xd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].xd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].xd*(alpha*beta*gamma);
forceInt.yd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].yd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].yd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].yd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].yd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].yd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].yd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].yd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].yd*alpha*beta*gamma;
forceInt.zd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].zd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].zd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].zd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].zd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].zd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].zd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].zd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].zd*alpha*beta*gamma;
return(forceInt);
}
inline bool PointIsCloseToGroup(int pt, int grp, HDGroup *Groups, VoxelPositionDouble **HDPts) {
int i;
for(i=0; i < Groups[grp].numPoints; i++) {
if(
(fabs((*HDPts)[pt].x - (*HDPts)[Groups[grp].Points[i]].x) <= 1) &&
(fabs((*HDPts)[pt].y - (*HDPts)[Groups[grp].Points[i]].y) <= 1) &&
(fabs((*HDPts)[pt].z - (*HDPts)[Groups[grp].Points[i]].z) <= 1))
{
return true;
}
}
return false;
}
|
e875f67c11126d17dc667433e42a9d3c835a3f7f.cu
|
// Find high divergence points of a vector field
// --- Input: 1. normalized 3D vector field
//
// dFx dFy dFz
// divergence = ----- + ----- + -----
// dx dy dz
//
// --- Output: highest ...% divergence point list
// --- Author: Nicu D. Cornea, Vizlab, Rutgers University
// --- Date: Wed Aug 20 17:53:56 EDT 2003
//
#include "HighDiverg.h"
// #define TRACE
#define SEARCH_GRID 1
#define CELL_SIZE 1.00 / SEARCH_GRID
#define MAX_NUM_HDPTS 5000
typedef struct Lock
{
int *mutex;
Lock()
{
int state = 0;
cudaMalloc((void **)&mutex,sizeof(int));
cudaMemset(mutex,0, sizeof(int));
}
~Lock()
{
cudaFree(mutex);
}
__device__ void lock()
{
while(atomicCAS(mutex, 0, 1) != 0);
}
__device__ void unlock()
{
atomicExch(mutex, 0);
}
}Lock;
typedef struct {
int* Points;
int numPoints;
} HDGroup;
inline bool PointIsCloseToGroup(int pt, int grp, HDGroup *Groups, VoxelPositionDouble **HDPts);
__host__ __device__ Vector interpolation(double x, double y, double z, int sizx, int sizy, int sizz, Vector *forcevec);
__global__ void max_min_divergence(unsigned char *flags,Vector *ForceField,double *maxDiv,double *minDiv,bool inOut,int slsz,int L, int M, int N, double vdist,Lock* mylock)
{
int k=blockIdx.x;
int j=blockIdx.y;
int i=blockIdx.z;
double div;
int idx=k*slsz + j*L +i;
double x, y, z;
if(!inOut) {
// - if this point is EXTERIOR, BOUNDARY or SURF, skip it
if( (flags[idx] == EXTERIOR) ||
(flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
return;
}
}
else {
// we look for high divergence points outside the object too
// ignore only boundary points.
if( (flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
return;
}
}
for(int kk=0; kk < SEARCH_GRID; kk++) {
for(int jj=0; jj < SEARCH_GRID; jj++) {
for(int ii=0; ii < SEARCH_GRID; ii++) {
x = i + (ii * CELL_SIZE);
y = j + (jj * CELL_SIZE);
z = k + (kk * CELL_SIZE);
#ifdef TRACE
// printf("At point: (%lf, %lf, %lf)\n", x, y, z);
#endif
// interpolate force vectors arround the point
Vector v_0 = interpolation(x + vdist, y, z, L, M, N, ForceField);
Vector v_1 = interpolation(x - vdist, y, z, L, M, N, ForceField);
Vector v_2 = interpolation(x, y + vdist, z, L, M, N, ForceField);
Vector v_3 = interpolation(x, y - vdist, z, L, M, N, ForceField);
Vector v_4 = interpolation(x, y, z + vdist, L, M, N, ForceField);
Vector v_5 = interpolation(x, y, z - vdist, L, M, N, ForceField);
div = ((v_0.xd - v_1.xd) + (v_2.yd - v_3.yd) + (v_4.zd - v_5.zd)) / (2 * vdist);
#ifdef TRACE
/*
printf("Forces:\n");
for(s = 0; s < 6; s++) {
printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
}
printf("Div = %lf\n", div);
*/
#endif
mylock->lock();
if(div > *maxDiv)
{
*maxDiv = div;
}
if(div < *minDiv)
{
*minDiv = div;
}div;
mylock->unlock();
}
}
}
}
// double GetDiv(double x, double y, double z);
bool GetHighDivergencePoints(
Vector* ForceField, // [in] vector field
int L, int M, int N, // [in] size of vector field (X, Y and Z)
unsigned char *flags, // [in] flags array
float perc, // [in] percentage of high div. points
// to be returned (top <perc> %)
VoxelPositionDouble **HDPts, // [out] high divergence point list
int *numHDPts, // [out] number of points in the list
bool inOut // [in] flag specifying if we should look
// outside the object too (if true).
// DEFAULT: false
) {
#ifdef TRACE
printf("TRACE: Starting GetHighDivergencePoints function. Cellsize = %lf\n", CELL_SIZE);
#endif
(*HDPts) = NULL;
(*numHDPts) = 0;
if(perc == 0) {
return true;
}
long idx, slsz;
int i,j,k, ii, jj, kk, s;
double x, y, z;
long cntz, cntnz;
slsz = L*M; // slice size
double adiv[MAX_NUM_HDPTS]; // divergence array
if(((*HDPts) = new VoxelPositionDouble[MAX_NUM_HDPTS]) == NULL) {
printf("GetHighDivergencePoints: UPS! - Error allocating memory for the output array. Abort.\n");
exit(1);
}
// calculate divergence throughout the dataset
double maxDiv = -999999.99;
double minDiv = 999999.99;
double div;
cntz = 0;
cntnz = 0;
double zerodiv = 0.1;
/////////////////////////////////////
Vector v[6];
double vdist = (CELL_SIZE) / 2.00;
#ifdef TRACE
printf("vdist = %lf\n", vdist);
#endif
printf("Finding high divergence points (1).\n");
unsigned char *d_flags;
Vector *d_ForceField;
double *d_maxDiv;
double *d_minDiv;
cudaMalloc((void **)&d_flags,sizeof(unsigned char)*L*M*N);
cudaMalloc((void **)&d_ForceField,sizeof(Vector)*L*M*N);
cudaMalloc((void **)&d_minDiv,sizeof(double));
cudaMalloc((void **)&d_maxDiv,sizeof(double));
cudaMemcpy(d_flags,flags,sizeof(unsigned char)*L*M*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_ForceField,ForceField,sizeof(Vector)*L*M*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_maxDiv,&maxDiv,sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_minDiv,&minDiv,sizeof(double),cudaMemcpyHostToDevice);
dim3 dimBlock(1);
dim3 dimGrid(N,M,L);
Lock *mylock=new Lock();
max_min_divergence<<<dimGrid,dimBlock>>>(d_flags,d_ForceField,d_maxDiv,d_minDiv,inOut,slsz,L,M,N,vdist,mylock);
cudaMemcpy(&maxDiv,d_maxDiv,sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(&minDiv,d_minDiv,sizeof(double),cudaMemcpyDeviceToHost);
// for (k = 1; k < N-1; k++) {
// printf("\tProcessing plane %d out of %d\r", k, N-2);
// fflush(stdout);
// for (j = 1; j < M-1; j++) {
// for (i = 1; i < L-1; i++) {
// idx = k*slsz + j*L +i;
// // if we are not looking outside the object too.
// if(!inOut) {
// // - if this point is EXTERIOR, BOUNDARY or SURF, skip it
// if( (flags[idx] == EXTERIOR) ||
// (flags[idx] == BOUNDARY) ||
// (flags[idx] == SURF))
// {
// continue;
// }
// }
// else {
// // we look for high divergence points outside the object too
// // ignore only boundary points.
// if( (flags[idx] == BOUNDARY) ||
// (flags[idx] == SURF))
// {
// continue;
// }
// }
// for(kk=0; kk < SEARCH_GRID; kk++) {
// for(jj=0; jj < SEARCH_GRID; jj++) {
// for(ii=0; ii < SEARCH_GRID; ii++) {
// x = i + (ii * CELL_SIZE);
// y = j + (jj * CELL_SIZE);
// z = k + (kk * CELL_SIZE);
// #ifdef TRACE
// // printf("At point: (%lf, %lf, %lf)\n", x, y, z);
// #endif
// // interpolate force vectors arround the point
// v[0] = interpolation(x + vdist, y, z, L, M, N, ForceField);
// v[1] = interpolation(x - vdist, y, z, L, M, N, ForceField);
// v[2] = interpolation(x, y + vdist, z, L, M, N, ForceField);
// v[3] = interpolation(x, y - vdist, z, L, M, N, ForceField);
// v[4] = interpolation(x, y, z + vdist, L, M, N, ForceField);
// v[5] = interpolation(x, y, z - vdist, L, M, N, ForceField);
// div = ((v[0].xd - v[1].xd) + (v[2].yd - v[3].yd) + (v[4].zd - v[5].zd)) / (2 * vdist);
// if((div > -zerodiv) && (div < zerodiv)) {
// cntz++;
// }
// else {
// cntnz++;
// }
// #ifdef TRACE
// /*
// printf("Forces:\n");
// for(s = 0; s < 6; s++) {
// printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
// }
// printf("Div = %lf\n", div);
// */
// #endif
// if(div > maxDiv) {
// maxDiv = div;
// }
// if(div < minDiv) {
// minDiv = div;
// }div;
// }
// }
// }
// }
// }
// }
#ifdef _DEBUG
printf("Divergence: max = %lf, min = %lf\n", maxDiv, minDiv);
#endif
double threshold;
// case 1:
// take <perc> percent of the lowest negative value:
// !! have to change the comparison
threshold = maxDiv - minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = minDiv + threshold;
/*
// case 2:
// take <perc> percent of the highest pozitive value:
// !! have to change the comparison
// NOT GOOD
threshold = maxDiv - minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = maxDiv - threshold;
*/
/*
// case 3:
// take <perc> percent of the lowest value (must be negative):
// !! have to change the comparison
// NOT GOOD
threshold = minDiv;
threshold = ((double)perc / 100.00) * threshold;
threshold = minDiv - threshold;
*/
#ifdef _DEBUG
printf("Threshold set to: %lf\n", threshold);
printf("Number of close to 0 divergence points [-%lf..%lf]: %ld. \n \
Number of non 0 divergence points: %ld.\n",
zerodiv, zerodiv, cntz, cntnz);
#endif
printf("Finding high divergence points (2).\n");
for (k = 1; k < N-1; k++) {
printf("\tProcessing plane %d out of %d\r", k, N-2);
fflush(stdout);
for (j = 1; j < M-1; j++) {
for (i = 1; i < L-1; i++) {
idx = k*slsz + j*L +i;
if(!inOut) {
// - if this point is EXTERIOR, BOUNDARY or SURF, skip it
if( (flags[idx] == EXTERIOR) ||
(flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
continue;
}
}
else {
// we look for high divergence points outside the object too
// ignore only boundary points.
if( (flags[idx] == BOUNDARY) ||
(flags[idx] == SURF))
{
continue;
}
}
for(kk=0; kk < SEARCH_GRID; kk++) {
for(jj=0; jj < SEARCH_GRID; jj++) {
for(ii=0; ii < SEARCH_GRID; ii++) {
x = i + (ii * CELL_SIZE);
y = j + (jj * CELL_SIZE);
z = k + (kk * CELL_SIZE);
#ifdef TRACE
// printf("At point: (%lf, %lf, %lf)\n", x, y, z);
#endif
// interpolate force vectors arround the point
v[0] = interpolation(x + vdist, y, z, L, M, N, ForceField);
v[1] = interpolation(x - vdist, y, z, L, M, N, ForceField);
v[2] = interpolation(x, y + vdist, z, L, M, N, ForceField);
v[3] = interpolation(x, y - vdist, z, L, M, N, ForceField);
v[4] = interpolation(x, y, z + vdist, L, M, N, ForceField);
v[5] = interpolation(x, y, z - vdist, L, M, N, ForceField);
div = ((v[0].xd - v[1].xd) + (v[2].yd - v[3].yd) + (v[4].zd - v[5].zd)) / (2 * vdist);
#ifdef TRACE
/*
printf("Forces:\n");
for(s = 0; s < 6; s++) {
printf("%lf %lf %lf\n", v[s].xd, v[s].yd, v[s].zd);
}
printf("Div = %lf\n", div);
*/
#endif
if(div <= threshold) {
// add the point to the HD list
(*HDPts)[(*numHDPts)].x = x;
(*HDPts)[(*numHDPts)].y = y;
(*HDPts)[(*numHDPts)].z = z;
adiv[(*numHDPts)] = div;
(*numHDPts) = (*numHDPts) + 1;
if((*numHDPts) >= MAX_NUM_HDPTS) {
printf("UPS! Too many high divergence points detected. \
Reached maximum of %d. Abort\n", MAX_NUM_HDPTS);
exit(1);
}
}
}
}
}
}
}
}
//
// sort the points on the divergence value;
//
double minval, tmp;
int minpos;
for(i=0; i < (*numHDPts); i++) {
minval = adiv[i];
minpos = i;
for(j=i+1; j < (*numHDPts); j++) {
if(adiv[j] < minval) {
minval = adiv[j];
minpos = j;
}
}
if(minpos != i) {
// exchange points and div values
tmp = adiv[i];
adiv[i] = adiv[minpos];
adiv[minpos] = tmp;
tmp = (*HDPts)[i].x; (*HDPts)[i].x = (*HDPts)[minpos].x; (*HDPts)[minpos].x = tmp;
tmp = (*HDPts)[i].y; (*HDPts)[i].y = (*HDPts)[minpos].y; (*HDPts)[minpos].y = tmp;
tmp = (*HDPts)[i].z; (*HDPts)[i].z = (*HDPts)[minpos].z; (*HDPts)[minpos].z = tmp;
}
}
#ifdef TRACE
printf("Points: \n");
for(i=0; i < (*numHDPts); i++) {
printf("%f %f %f - %f\n", (*HDPts)[i].x, (*HDPts)[i].y, (*HDPts)[i].z, adiv[i]);
}
#endif
//
// cluster the points
//
// Algorithm:
// First point creates the first group.
// For all the other points:
// If the point is close to an existing group
// add the point to that group
// else
// the point starts a new group
// endif
// endfor
// end
//
// initialize data structure
HDGroup *Groups;
int numGroups = 0;
if((Groups = new HDGroup[(*numHDPts)]) == NULL) {
printf("Error allocating memory for working data structures. Abort\n");
exit(1);
}
for(i=0; i < (*numHDPts); i++) {
if((Groups[i].Points = new int[(*numHDPts)]) == NULL) {
printf("Error allocating memory for working data structures. Abort\n");
exit(1);
}
Groups[i].numPoints = 0;
}
bool closeToSomeGroup = false;
// first point creates the first group
Groups[0].Points[0] = 0;
Groups[0].numPoints = 1;
numGroups = 1;
for(i=1; i < (*numHDPts); i++) {
closeToSomeGroup = false;
for(j=0; j < numGroups; j++) {
if(PointIsCloseToGroup(i, j, Groups, HDPts)) {
// add the point to that group
Groups[j].Points[Groups[j].numPoints] = i;
Groups[j].numPoints = Groups[j].numPoints + 1;
closeToSomeGroup = true;
break;
}
}
if(!closeToSomeGroup) {
// start a new group
Groups[numGroups].Points[0] = i;
Groups[numGroups].numPoints = 1;
numGroups++;
}
}
#ifdef TRACE
// print the clustered points:
printf("Clustered points:\n");
for(i=0; i < numGroups; i++) {
printf("%f %f %f\n",
(*HDPts)[Groups[i].Points[0]].x, (*HDPts)[Groups[i].Points[0]].y, (*HDPts)[Groups[i].Points[0]].z);
for(j=1; j < Groups[i].numPoints; j++) {
printf("\t%f %f %f\n",
(*HDPts)[Groups[i].Points[j]].x, (*HDPts)[Groups[i].Points[j]].y, (*HDPts)[Groups[i].Points[j]].z);
}
}
#endif
//
// Return only the first point in each group as the high divergence points
//
VoxelPositionDouble* newHDPts;
if((newHDPts = new VoxelPositionDouble[numGroups]) == NULL) {
printf("GetHighDivergencePoints: UPS! - Error allocating memory for the output array. Abort.\n");
exit(1);
}
for(i=0; i < numGroups; i++) {
newHDPts[i].x = (*HDPts)[Groups[i].Points[0]].x;
newHDPts[i].y = (*HDPts)[Groups[i].Points[0]].y;
newHDPts[i].z = (*HDPts)[Groups[i].Points[0]].z;
}
// delete the old array
delete [] (*HDPts);
// delete Group data structure
for(i=0; i < numGroups; i++) {
delete [] Groups[i].Points;
}
delete [] Groups;
// return the new array
(*HDPts) = newHDPts;
(*numHDPts) = numGroups;
#ifdef TRACE
printf("Returning points: \n");
for(i=0; i < (*numHDPts); i++) {
printf("%f %f %f - %f\n", (*HDPts)[i].x, (*HDPts)[i].y, (*HDPts)[i].z, adiv[i]);
}
#endif
return true;
}
__device__ __host__ inline Vector interpolation(double x, double y, double z, int sizx, int sizy, int sizz, Vector *forcevec)
{
float alpha, beta, gamma;
Vector forceInt;
long slsz;
alpha=x-int(x);
beta=y-int(y);
gamma=z-int(z);
slsz=sizy*sizx;
forceInt.xd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].xd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].xd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].xd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].xd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].xd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].xd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].xd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].xd*(alpha*beta*gamma);
forceInt.yd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].yd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].yd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].yd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].yd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].yd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].yd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].yd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].yd*alpha*beta*gamma;
forceInt.zd=forcevec[int(z)*slsz + int(y)*sizx + int(x)].zd*(1-alpha)*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + int(x)].zd*(1-alpha)*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + int(x)].zd*(1-alpha)*beta*(1-gamma)
+forcevec[int(z)*slsz + int(y)*sizx + (int(x)+1)].zd*alpha*(1-beta)*(1-gamma)
+forcevec[(int(z)+1)*slsz + int(y)*sizx + (int(x)+1)].zd*alpha*(1-beta)*gamma
+forcevec[int(z)*slsz + (int(y)+1)*sizx + (int(x)+1)].zd*alpha*beta*(1-gamma)
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + int(x)].zd*(1-alpha)*beta*gamma
+forcevec[(int(z)+1)*slsz + (int(y)+1)*sizx + (int(x)+1)].zd*alpha*beta*gamma;
return(forceInt);
}
inline bool PointIsCloseToGroup(int pt, int grp, HDGroup *Groups, VoxelPositionDouble **HDPts) {
int i;
for(i=0; i < Groups[grp].numPoints; i++) {
if(
(fabs((*HDPts)[pt].x - (*HDPts)[Groups[grp].Points[i]].x) <= 1) &&
(fabs((*HDPts)[pt].y - (*HDPts)[Groups[grp].Points[i]].y) <= 1) &&
(fabs((*HDPts)[pt].z - (*HDPts)[Groups[grp].Points[i]].z) <= 1))
{
return true;
}
}
return false;
}
|
471574b0175a7c73948754ea09d7775d7b348055.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "canny.h"
// see note about constant memory: https://stackoverflow.com/a/2452813/2397327
__constant__ float dFlt[1024];
__host__ void setFilter(float *flt, unsigned size)
{
CUDAERR(hipMemcpyToSymbol(dFlt, flt, size),
"copying flt to dFlt");
}
/**
* performs a 2D convolution of an single-channel image d1 with a single-channel
* filter filt, resulting in an image with the same dimensions as and centered
* around d1
*/
__global__ void conv2d(byte *d1, byte *d3,
int h1, int w1, int h2, int w2)
{
int y, x, i, j, imin, imax, jmin, jmax, ip, jp;
float sum;
// infer y, x, from block/thread index
y = blockDim.y * blockIdx.y + threadIdx.y;
x = blockDim.x * blockIdx.x + threadIdx.x;
// out of bounds, no work to do
if (x >= w1 || y >= h1) {
return;
}
// appropriate ranges for convolution
imin = max(0, y+h2/2-h2+1);
imax = min(h1, y+h2/2+1);
jmin = max(0, x+w2/2-w2+1);
jmax = min(w1, x+w2/2+1);
// convolution
sum = 0;
for (i = imin; i < imax; ++i) {
for (j = jmin; j < jmax; ++j) {
ip = i - h2/2;
jp = j - w2/2;
sum += d1[i*w1 + j] * dFlt[(y-ip)*w2 + (x-jp)];
}
}
// set result
d3[y*w1 + x] = sum;
}
// 1D convolution along x-direction with shared memory: loads in an apron
// assumed that block size in x direction is lbs (long blocksize = 64),
// and block size in y direction is sbs (short blocksize = 16) for performance;
// also best to have filt be in constant memory for performance purposes
__global__ void conv1dRows(byte *dIn, byte *dOut,
int h, int w, int fltSize)
{
int y, x, as, i, j;
float sum;
__shared__ byte tmp[lbs*sbs];
as = fltSize>>1; // apron size
// infer y, x, from block/thread index
// note extra operations based on apron for x
y = sbs * blockIdx.y + ty;
x = (lbs-(as<<1)) * blockIdx.x + tx-as;
// load data
if (y<h && x>=0 && x<w) {
tmp[ty*lbs+tx] = dIn[y*w+x];
}
__syncthreads();
// perform 1-D convolution
if (tx>=as && tx<lbs-as && y<h && x<w) {
for (i = ty*lbs+tx-as, j = 0, sum = 0; j < fltSize; ++i, ++j) {
sum += dFlt[j] * tmp[i];
}
// set result
dOut[y*w+x] = sum;
}
}
// same as above but with columns; assumes that blocksize is 64 in the column
// direction and blocksize is 16 in the row dimension
__global__ void conv1dCols(byte *dIn, byte *dOut,
int h, int w, int fltSize)
{
int y, x, as, i, j;
float sum;
__shared__ byte tmp[lbs*sbs];
as = fltSize>>1; // apron size
// infer y, x, from block/thread index
// note extra operations based on apron for x
y = (lbs-(as<<1)) * blockIdx.y + ty-as;
x = sbs * blockIdx.x + tx;
// load data
if (y>=0 && y<h && x<w) {
tmp[ty*sbs+tx] = dIn[y*w+x];
}
__syncthreads();
// perform 1-D convolution
if (ty>=as && ty<lbs-as && y<h && x<w) {
for (i = (ty-as)*sbs+tx, j = 0, sum = 0; j < fltSize;
i+=sbs, ++j) {
sum += dFlt[j] * tmp[i];
}
// set result
dOut[y*w+x] = sum;
}
}
|
471574b0175a7c73948754ea09d7775d7b348055.cu
|
#include "canny.h"
// see note about constant memory: https://stackoverflow.com/a/2452813/2397327
__constant__ float dFlt[1024];
__host__ void setFilter(float *flt, unsigned size)
{
CUDAERR(cudaMemcpyToSymbol(dFlt, flt, size),
"copying flt to dFlt");
}
/**
* performs a 2D convolution of an single-channel image d1 with a single-channel
* filter filt, resulting in an image with the same dimensions as and centered
* around d1
*/
__global__ void conv2d(byte *d1, byte *d3,
int h1, int w1, int h2, int w2)
{
int y, x, i, j, imin, imax, jmin, jmax, ip, jp;
float sum;
// infer y, x, from block/thread index
y = blockDim.y * blockIdx.y + threadIdx.y;
x = blockDim.x * blockIdx.x + threadIdx.x;
// out of bounds, no work to do
if (x >= w1 || y >= h1) {
return;
}
// appropriate ranges for convolution
imin = max(0, y+h2/2-h2+1);
imax = min(h1, y+h2/2+1);
jmin = max(0, x+w2/2-w2+1);
jmax = min(w1, x+w2/2+1);
// convolution
sum = 0;
for (i = imin; i < imax; ++i) {
for (j = jmin; j < jmax; ++j) {
ip = i - h2/2;
jp = j - w2/2;
sum += d1[i*w1 + j] * dFlt[(y-ip)*w2 + (x-jp)];
}
}
// set result
d3[y*w1 + x] = sum;
}
// 1D convolution along x-direction with shared memory: loads in an apron
// assumed that block size in x direction is lbs (long blocksize = 64),
// and block size in y direction is sbs (short blocksize = 16) for performance;
// also best to have filt be in constant memory for performance purposes
__global__ void conv1dRows(byte *dIn, byte *dOut,
int h, int w, int fltSize)
{
int y, x, as, i, j;
float sum;
__shared__ byte tmp[lbs*sbs];
as = fltSize>>1; // apron size
// infer y, x, from block/thread index
// note extra operations based on apron for x
y = sbs * blockIdx.y + ty;
x = (lbs-(as<<1)) * blockIdx.x + tx-as;
// load data
if (y<h && x>=0 && x<w) {
tmp[ty*lbs+tx] = dIn[y*w+x];
}
__syncthreads();
// perform 1-D convolution
if (tx>=as && tx<lbs-as && y<h && x<w) {
for (i = ty*lbs+tx-as, j = 0, sum = 0; j < fltSize; ++i, ++j) {
sum += dFlt[j] * tmp[i];
}
// set result
dOut[y*w+x] = sum;
}
}
// same as above but with columns; assumes that blocksize is 64 in the column
// direction and blocksize is 16 in the row dimension
__global__ void conv1dCols(byte *dIn, byte *dOut,
int h, int w, int fltSize)
{
int y, x, as, i, j;
float sum;
__shared__ byte tmp[lbs*sbs];
as = fltSize>>1; // apron size
// infer y, x, from block/thread index
// note extra operations based on apron for x
y = (lbs-(as<<1)) * blockIdx.y + ty-as;
x = sbs * blockIdx.x + tx;
// load data
if (y>=0 && y<h && x<w) {
tmp[ty*sbs+tx] = dIn[y*w+x];
}
__syncthreads();
// perform 1-D convolution
if (ty>=as && ty<lbs-as && y<h && x<w) {
for (i = (ty-as)*sbs+tx, j = 0, sum = 0; j < fltSize;
i+=sbs, ++j) {
sum += dFlt[j] * tmp[i];
}
// set result
dOut[y*w+x] = sum;
}
}
|
a232f35646bd3f652cb55640c99f1205e49bd437.hip
|
// !!! This is a file automatically generated by hipify!!!
/***********************************************************
*
* Developed for Seminar in Parallelisation of Physics
* Calculations on GPUs with CUDA, Department of Physics
* Technical University of Munich.
*
* Author: Binu Amaratunga
*
*
***********************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <hip/hip_complex.h>
#include "controls.h"
#include "utils.h"
/*************************************
* Compute 2D FFT with cuFFT
* output
*
*
*
**************************************/
void fft2(hipDoubleComplex * inData, const unsigned int N) {
hipfftDoubleComplex *d_inData = NULL;
gpuErrChk(hipMalloc(&d_inData, N * N * sizeof(hipfftDoubleComplex)));
gpuErrChk(hipMemcpy(d_inData, inData, N * N * sizeof(hipfftDoubleComplex), hipMemcpyHostToDevice));
hipfftHandle plan;
hipfftResult flag;
flag = hipfftPlan2d(&plan, N, N, HIPFFT_Z2Z);
if ( HIPFFT_SUCCESS != flag ) printf("2D: hipfftPlan2d fails!\n");
flag = hipfftExecZ2Z(plan, d_inData, d_inData, HIPFFT_FORWARD);
if ( HIPFFT_SUCCESS != flag ) printf("2D: hipfftExecR2C fails!\n");
gpuErrChk(hipDeviceSynchronize());
gpuErrChk(hipMemcpy(inData, d_inData, N * N * sizeof(hipfftDoubleComplex), hipMemcpyDeviceToHost) );
flag = hipfftDestroy(plan);
if ( HIPFFT_SUCCESS != flag ) printf("2D: hipfftDestroy fails!\n");
gpuErrChk(hipFree(d_inData));
}
int main(int argc, char** argv){
if(argc < 2) {
printf("Enter the dimension size as argument!\n");
exit(EXIT_FAILURE);
}
int N = atoi(argv[1]);
// Complex data input
hipDoubleComplex * inputData = (hipDoubleComplex *)malloc(N * N * sizeof(hipDoubleComplex));
// Real data
double * outputData = (double *)malloc(N * N * sizeof(double));
int slit_height = 1;
int slit_width = 2;
int slit_dist = 7;
// TODO: Create this data on the device itself
// Create double slit
for (int j = 0; j < N; j++){
for (int i = 0; i < N; i++){
inputData[j * N + i] = make_cuDoubleComplex(0.0, 0.0);
// Set slit positions to 1
if ((abs(i-N/2) <= slit_dist+slit_width) && (abs(i-N/2) >= slit_dist) && (abs(j-N/2) <= slit_height)){
inputData[j * N + i] = make_cuDoubleComplex(1.0, 0.0);
} // printf("%0.0lf ", reInput[j * N + i]);
} // printf("\n");
}
printf("Running fft for %d x %d = %d = 2 ^ %d data points...\n", N, N, N*N, (int)(log(N*N)/log(2)));
clock_t start, end;
double cpu_time_used;
start = clock();
fft2(inputData, N);
// TODO: Do this in cuBLAS
for(int i = 0; i < N*N; i++){
outputData[i] = cuCreal(inputData[i]) * cuCreal(inputData[i])
+ cuCimag(inputData[i]) * cuCimag(inputData[i]);
}
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Runtime = %lfs\n", cpu_time_used);
printf("Writing output data...\n");
writeCSV(outputData, 0, N);
free(inputData);
free(outputData);
return 0;
}
|
a232f35646bd3f652cb55640c99f1205e49bd437.cu
|
/***********************************************************
*
* Developed for Seminar in Parallelisation of Physics
* Calculations on GPUs with CUDA, Department of Physics
* Technical University of Munich.
*
* Author: Binu Amaratunga
*
*
***********************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <cufft.h>
#include <cufftXt.h>
#include <cuComplex.h>
#include "controls.h"
#include "utils.h"
/*************************************
* Compute 2D FFT with cuFFT
* output
*
*
*
**************************************/
void fft2(cuDoubleComplex * inData, const unsigned int N) {
cufftDoubleComplex *d_inData = NULL;
gpuErrChk(cudaMalloc(&d_inData, N * N * sizeof(cufftDoubleComplex)));
gpuErrChk(cudaMemcpy(d_inData, inData, N * N * sizeof(cufftDoubleComplex), cudaMemcpyHostToDevice));
cufftHandle plan;
cufftResult flag;
flag = cufftPlan2d(&plan, N, N, CUFFT_Z2Z);
if ( CUFFT_SUCCESS != flag ) printf("2D: cufftPlan2d fails!\n");
flag = cufftExecZ2Z(plan, d_inData, d_inData, CUFFT_FORWARD);
if ( CUFFT_SUCCESS != flag ) printf("2D: cufftExecR2C fails!\n");
gpuErrChk(cudaDeviceSynchronize());
gpuErrChk(cudaMemcpy(inData, d_inData, N * N * sizeof(cufftDoubleComplex), cudaMemcpyDeviceToHost) );
flag = cufftDestroy(plan);
if ( CUFFT_SUCCESS != flag ) printf("2D: cufftDestroy fails!\n");
gpuErrChk(cudaFree(d_inData));
}
int main(int argc, char** argv){
if(argc < 2) {
printf("Enter the dimension size as argument!\n");
exit(EXIT_FAILURE);
}
int N = atoi(argv[1]);
// Complex data input
cuDoubleComplex * inputData = (cuDoubleComplex *)malloc(N * N * sizeof(cuDoubleComplex));
// Real data
double * outputData = (double *)malloc(N * N * sizeof(double));
int slit_height = 1;
int slit_width = 2;
int slit_dist = 7;
// TODO: Create this data on the device itself
// Create double slit
for (int j = 0; j < N; j++){
for (int i = 0; i < N; i++){
inputData[j * N + i] = make_cuDoubleComplex(0.0, 0.0);
// Set slit positions to 1
if ((abs(i-N/2) <= slit_dist+slit_width) && (abs(i-N/2) >= slit_dist) && (abs(j-N/2) <= slit_height)){
inputData[j * N + i] = make_cuDoubleComplex(1.0, 0.0);
} // printf("%0.0lf ", reInput[j * N + i]);
} // printf("\n");
}
printf("Running fft for %d x %d = %d = 2 ^ %d data points...\n", N, N, N*N, (int)(log(N*N)/log(2)));
clock_t start, end;
double cpu_time_used;
start = clock();
fft2(inputData, N);
// TODO: Do this in cuBLAS
for(int i = 0; i < N*N; i++){
outputData[i] = cuCreal(inputData[i]) * cuCreal(inputData[i])
+ cuCimag(inputData[i]) * cuCimag(inputData[i]);
}
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Runtime = %lfs\n", cpu_time_used);
printf("Writing output data...\n");
writeCSV(outputData, 0, N);
free(inputData);
free(outputData);
return 0;
}
|
9e8a079995e465cd3cd50b33da6bf04a93969a31.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop .
dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to
dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare.
@generated from zhemv_mgpu_upper.cu normal z -> d, Fri Sep 11 18:29:22 2015
@author Mark Gates
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
********************************************************************/
__global__ void
dsymv_kernel_U_mgpu(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( (partial && tx >= partial) ||
(blk == 0 && tx < block_offset) ) {
sx_blk[tx] = MAGMA_D_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
int next = blk + (my_gpu_id + ngpu - 1 - blk % ngpu) % ngpu + 1;
A += (next/ngpu)*NB_X*lda; // A is A(blk_ind + tx, next*NB_X + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=next; jj < gridDim.x; jj += ngpu) {
partial = (jj == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// block is right of diagonal, so don't need to worry about offset here
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_D_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_D_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; //MAGMA_D_MAKE( tx4, blk ); // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; //MAGMA_D_MAKE( tx, blk ); // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_U_mgpu
/**************************************************************
Upper case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ]
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
Note beta*y is not included here; see magmablas_dsymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * ] blk=0 * data for non-transposed row w_blk = A_{blk,1:nblock} * x_{blk:nblock}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ x x * ] blk=2 blanks are not set
[ * ] blk=3
[ x x x x * ] blk=4
[ * ] blk=0
work[gpu=1] = [ x * ] blk=1
[ * ] blk=2
[ x x x * ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries right of the diagonal blocks are not accessed.
There are no blank lines; work has been set to 0 if a GPU has no data to contribute.
[ * ]
y[gpu=0] = [ * ]
[ x + x + * ]
[ * ]
[ x + x + x + x + * ]
[ * ]
y[gpu=1] = [ x + * ]
[ * ]
[ x + x + x + * ]
[ * ]
********************************************************************/
__global__ void
dsymv_kernel_U_mgpu_sum(
int n,
double alpha,
int lda,
double * __restrict__ y, int incy,
double const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
double Ax = MAGMA_D_ZERO;
work += ind;
// if this GPU owns block-column blk, all blocks j=[0, ..., blk] contain data;
// else only block j=blk contains data.
int first = 0;
if ( blk % ngpu != my_gpu_id ) {
first = blk;
}
for (int j = first; j <= blk; ++j) {
Ax += work[j*lda];
}
y[ind * incy] = alpha*Ax; // see magmablas_dsymv_sync for beta*y
}
}
// end dsymv_kernel_L_mgpu_sum
|
9e8a079995e465cd3cd50b33da6bf04a93969a31.cu
|
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop .
dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to
dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare.
@generated from zhemv_mgpu_upper.cu normal z -> d, Fri Sep 11 18:29:22 2015
@author Mark Gates
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
********************************************************************/
__global__ void
dsymv_kernel_U_mgpu(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( (partial && tx >= partial) ||
(blk == 0 && tx < block_offset) ) {
sx_blk[tx] = MAGMA_D_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
int next = blk + (my_gpu_id + ngpu - 1 - blk % ngpu) % ngpu + 1;
A += (next/ngpu)*NB_X*lda; // A is A(blk_ind + tx, next*NB_X + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=next; jj < gridDim.x; jj += ngpu) {
partial = (jj == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// block is right of diagonal, so don't need to worry about offset here
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_D_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_D_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; //MAGMA_D_MAKE( tx4, blk ); // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; //MAGMA_D_MAKE( tx, blk ); // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_U_mgpu
/**************************************************************
Upper case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ]
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
Note beta*y is not included here; see magmablas_dsymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * ] blk=0 * data for non-transposed row w_blk = A_{blk,1:nblock} * x_{blk:nblock}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ x x * ] blk=2 blanks are not set
[ * ] blk=3
[ x x x x * ] blk=4
[ * ] blk=0
work[gpu=1] = [ x * ] blk=1
[ * ] blk=2
[ x x x * ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries right of the diagonal blocks are not accessed.
There are no blank lines; work has been set to 0 if a GPU has no data to contribute.
[ * ]
y[gpu=0] = [ * ]
[ x + x + * ]
[ * ]
[ x + x + x + x + * ]
[ * ]
y[gpu=1] = [ x + * ]
[ * ]
[ x + x + x + * ]
[ * ]
********************************************************************/
__global__ void
dsymv_kernel_U_mgpu_sum(
int n,
double alpha,
int lda,
double * __restrict__ y, int incy,
double const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
double Ax = MAGMA_D_ZERO;
work += ind;
// if this GPU owns block-column blk, all blocks j=[0, ..., blk] contain data;
// else only block j=blk contains data.
int first = 0;
if ( blk % ngpu != my_gpu_id ) {
first = blk;
}
for (int j = first; j <= blk; ++j) {
Ax += work[j*lda];
}
y[ind * incy] = alpha*Ax; // see magmablas_dsymv_sync for beta*y
}
}
// end dsymv_kernel_L_mgpu_sum
|
99349d35919066c081d8cc4c64f24d9125aa0557.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* \file not_op.cu
* \desc The not operator
*/
#include "blaze/operator/op/not_op.h"
namespace blaze {
template <typename DType>
__global__ void NotKernel(NotParam<DType> params) {
CUDA_KERNEL_LOOP(index, params.size) {
params.y[index] = params.x[index] == 0 ? 1 : 0;
}
}
template <>
bool NotOp<CUDAContext>::RunOnDevice() {
Blob* X = this->Input(0);
Blob* Y = this->Output(0);
TYPE_SWITCH_ON_CUDA(X->data_type(), DType, {
// Reshape
Y->Reshape(X->shape());
// lauch the kernel
dim3 grid, block;
block.x = GetThreadsNum(X->size());
grid.x = CUDA_GET_BLOCKS(X->size(), block.x);
hipStream_t stream = this->context_.cuda_stream();
NotParam<DType> params(X->as<DType>(), X->size(), Y->as<DType>());
void* params_dptr = reinterpret_cast<void*>(¶ms);
CUDA_CHECK(cudaLaunchKernel(reinterpret_cast<void*>(&NotKernel<DType>),
grid,
block,
reinterpret_cast<void**>(¶ms_dptr),
0,
stream));
}); // TYPE_SWITCH(Y->data_type(), DType, {
return true;
}
REGISTER_CUDA_OPERATOR(Not, NotOp<CUDAContext>);
} // namespace blaze
|
99349d35919066c081d8cc4c64f24d9125aa0557.cu
|
/*
* \file not_op.cu
* \desc The not operator
*/
#include "blaze/operator/op/not_op.h"
namespace blaze {
template <typename DType>
__global__ void NotKernel(NotParam<DType> params) {
CUDA_KERNEL_LOOP(index, params.size) {
params.y[index] = params.x[index] == 0 ? 1 : 0;
}
}
template <>
bool NotOp<CUDAContext>::RunOnDevice() {
Blob* X = this->Input(0);
Blob* Y = this->Output(0);
TYPE_SWITCH_ON_CUDA(X->data_type(), DType, {
// Reshape
Y->Reshape(X->shape());
// lauch the kernel
dim3 grid, block;
block.x = GetThreadsNum(X->size());
grid.x = CUDA_GET_BLOCKS(X->size(), block.x);
cudaStream_t stream = this->context_.cuda_stream();
NotParam<DType> params(X->as<DType>(), X->size(), Y->as<DType>());
void* params_dptr = reinterpret_cast<void*>(¶ms);
CUDA_CHECK(cudaLaunchKernel(reinterpret_cast<void*>(&NotKernel<DType>),
grid,
block,
reinterpret_cast<void**>(¶ms_dptr),
0,
stream));
}); // TYPE_SWITCH(Y->data_type(), DType, {
return true;
}
REGISTER_CUDA_OPERATOR(Not, NotOp<CUDAContext>);
} // namespace blaze
|
5a42e0aa44873596f9057d7a426f0df7ac185aac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/MultiLabelMarginCriterion.hip"
#else
// TODO: improve error messages
void THNN_(MultiLabelMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
THCTensor *istarget,
int64_t reduction)
{
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
THCTensor_(resizeAs)(state, istarget, input);
if(input->dim() == 1)
{
int dim = input->size(0);
THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == dim), 3,
"inconsistent target size");
THCTensor_(resize1d)(state, output, 1);
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == Reduction::Mean
);
// THCudaCheck(hipGetLastError());
}
else if(input->dim() == 2)
{
int nframe = input->size(0);
int dim = input->size(1);
THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe)
&& (target->size(1) == dim), 3, "inconsistent target size");
dim3 blocks(input->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
if (reduction != Reduction::None)
{
THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0));
THCTensor_(resize1d)(state, output, 1);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output_tmp),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
reduction == Reduction::Mean
);
// THCudaCheck(hipGetLastError());
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(THCTensor_(sumall)(state, output_tmp)));
THCTensor_(free)(state, output_tmp);
}
else
{
THCTensor_(resize1d)(state, output, input->size(0));
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
false
);
// THCudaCheck(hipGetLastError());
}
}
else
AT_ERROR("non-empty vector or matrix expected, got size: ", input->sizes());
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
}
void THNN_(MultiLabelMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *istarget,
int64_t reduction)
{
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(gradInput->dim() == 1)
{
int dim = gradInput->size(0);
THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == dim), 3,
"inconsistent target size");
THArgCheck(!istarget->is_empty() && (istarget->dim() == 1) && (istarget->size(0) == dim), 3,
"inconsistent isTarget size");
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, gradInput->size(0),
reduction == Reduction::Mean,
reduction != Reduction::None);
}
else if(gradInput->dim() == 2)
{
int nframe = gradInput->size(0);
int dim = gradInput->size(1);
THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe)
&& (target->size(1) == dim), 3, "inconsistent target size");
THArgCheck(!istarget->is_empty() && (istarget->dim() == 2) && (istarget->size(0) == nframe)
&& (istarget->size(1) == dim), 3, "inconsistent isTarget size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
gradInput->size(0), gradInput->size(1),
reduction == Reduction::Mean,
reduction != Reduction::None);
}
else
AT_ERROR("non-empty vector or matrix expected, got size: ", gradInput->sizes());
// THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
THCTensor_(free)(state, gradOutput);
}
#endif
|
5a42e0aa44873596f9057d7a426f0df7ac185aac.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/MultiLabelMarginCriterion.cu"
#else
// TODO: improve error messages
void THNN_(MultiLabelMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
THCTensor *istarget,
int64_t reduction)
{
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
THCTensor_(resizeAs)(state, istarget, input);
if(input->dim() == 1)
{
int dim = input->size(0);
THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == dim), 3,
"inconsistent target size");
THCTensor_(resize1d)(state, output, 1);
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == Reduction::Mean
);
// THCudaCheck(cudaGetLastError());
}
else if(input->dim() == 2)
{
int nframe = input->size(0);
int dim = input->size(1);
THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe)
&& (target->size(1) == dim), 3, "inconsistent target size");
dim3 blocks(input->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
if (reduction != Reduction::None)
{
THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0));
THCTensor_(resize1d)(state, output, 1);
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output_tmp),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
reduction == Reduction::Mean
);
// THCudaCheck(cudaGetLastError());
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(THCTensor_(sumall)(state, output_tmp)));
THCTensor_(free)(state, output_tmp);
}
else
{
THCTensor_(resize1d)(state, output, input->size(0));
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
false
);
// THCudaCheck(cudaGetLastError());
}
}
else
AT_ERROR("non-empty vector or matrix expected, got size: ", input->sizes());
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
}
void THNN_(MultiLabelMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *istarget,
int64_t reduction)
{
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(gradInput->dim() == 1)
{
int dim = gradInput->size(0);
THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == dim), 3,
"inconsistent target size");
THArgCheck(!istarget->is_empty() && (istarget->dim() == 1) && (istarget->size(0) == dim), 3,
"inconsistent isTarget size");
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, gradInput->size(0),
reduction == Reduction::Mean,
reduction != Reduction::None);
}
else if(gradInput->dim() == 2)
{
int nframe = gradInput->size(0);
int dim = gradInput->size(1);
THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe)
&& (target->size(1) == dim), 3, "inconsistent target size");
THArgCheck(!istarget->is_empty() && (istarget->dim() == 2) && (istarget->size(0) == nframe)
&& (istarget->size(1) == dim), 3, "inconsistent isTarget size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
gradInput->size(0), gradInput->size(1),
reduction == Reduction::Mean,
reduction != Reduction::None);
}
else
AT_ERROR("non-empty vector or matrix expected, got size: ", gradInput->sizes());
// THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
THCTensor_(free)(state, gradOutput);
}
#endif
|
60a41386b8453fbd3623b5e8936dc08a8352fd07.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <chrono>
namespace caffe {
template<typename Dtype>
void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype) 1., weight,
bottom_data, (Dtype) 0., top_data);
if (bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
this->blobs_[1]->gpu_data(), top_data);
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1.,
bottom_data, weight, (Dtype) 0., top_data);
if (bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1.,
bias_multiplier_.gpu_data(),
this->blobs_[1]->gpu_data(), (Dtype) 1.,
top_data);
}
#endif // USE CUDA
} else {
#ifdef USE_GREENTEA
if (M_ == 1) {
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, N_,
K_, (Dtype) 1., (cl_mem) weight, 0,
(cl_mem) bottom_data, 0, (Dtype) 0.,
(cl_mem) top_data, 0);
if (bias_term_)
greentea_gpu_axpy<Dtype>(this->device_->id(), N_,
bias_multiplier_.cpu_data()[0],
(cl_mem) (this->blobs_[1]->gpu_data()), 0,
(cl_mem) top_data, 0);
} else {
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans,
CblasTrans, M_, N_, K_, (Dtype) 1.,
(cl_mem) bottom_data, 0, (cl_mem) weight, 0,
(Dtype) 0., (cl_mem) top_data, 0);
if (bias_term_)
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans,
CblasNoTrans, M_, N_, 1, (Dtype) 1.,
(cl_mem) (bias_multiplier_.gpu_data()), 0,
(cl_mem) (this->blobs_[1]->gpu_data()), 0,
(Dtype) 1., (cl_mem) top_data, 0);
}
this->device_->FinishQueues();
end = std::chrono::system_clock::now();
std::chrono::duration<double, std::milli> fp_ms = end - start;
VLOG(1) << "fc " << fp_ms.count();
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void InnerProductLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (this->param_propagate_down_[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
// Gradient with respect to weight
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1.,
top_diff, bottom_data, (Dtype) 1.,
this->blobs_[0]->mutable_gpu_diff());
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bias
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype) 1., top_diff,
bias_multiplier_.gpu_data(), (Dtype) 1.,
this->blobs_[1]->mutable_gpu_diff());
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bottom data
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1.,
top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0.,
bottom[0]->mutable_gpu_diff());
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
if (this->param_propagate_down_[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
// Gradient with respect to weight
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasTrans,
CblasNoTrans, N_, K_, M_, (Dtype) 1.,
(cl_mem) top_diff, 0, (cl_mem) bottom_data, 0,
(Dtype) 1.,
(cl_mem) (this->blobs_[0]->mutable_gpu_diff()),
0);
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bias
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, M_, N_,
(Dtype) 1., (cl_mem) top_diff, 0,
(cl_mem) (bias_multiplier_.gpu_data()), 0,
(Dtype) 1.,
(cl_mem) (this->blobs_[1]->mutable_gpu_diff()),
0);
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bottom data
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans,
CblasNoTrans, M_, K_, N_, (Dtype) 1.,
(cl_mem) top_diff, 0,
(cl_mem) (this->blobs_[0]->gpu_data()), 0,
(Dtype) 0.,
(cl_mem) (bottom[0]->mutable_gpu_diff()), 0);
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer);
} // namespace caffe
|
60a41386b8453fbd3623b5e8936dc08a8352fd07.cu
|
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <chrono>
namespace caffe {
template<typename Dtype>
void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype) 1., weight,
bottom_data, (Dtype) 0., top_data);
if (bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
this->blobs_[1]->gpu_data(), top_data);
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1.,
bottom_data, weight, (Dtype) 0., top_data);
if (bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1.,
bias_multiplier_.gpu_data(),
this->blobs_[1]->gpu_data(), (Dtype) 1.,
top_data);
}
#endif // USE CUDA
} else {
#ifdef USE_GREENTEA
if (M_ == 1) {
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, N_,
K_, (Dtype) 1., (cl_mem) weight, 0,
(cl_mem) bottom_data, 0, (Dtype) 0.,
(cl_mem) top_data, 0);
if (bias_term_)
greentea_gpu_axpy<Dtype>(this->device_->id(), N_,
bias_multiplier_.cpu_data()[0],
(cl_mem) (this->blobs_[1]->gpu_data()), 0,
(cl_mem) top_data, 0);
} else {
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans,
CblasTrans, M_, N_, K_, (Dtype) 1.,
(cl_mem) bottom_data, 0, (cl_mem) weight, 0,
(Dtype) 0., (cl_mem) top_data, 0);
if (bias_term_)
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans,
CblasNoTrans, M_, N_, 1, (Dtype) 1.,
(cl_mem) (bias_multiplier_.gpu_data()), 0,
(cl_mem) (this->blobs_[1]->gpu_data()), 0,
(Dtype) 1., (cl_mem) top_data, 0);
}
this->device_->FinishQueues();
end = std::chrono::system_clock::now();
std::chrono::duration<double, std::milli> fp_ms = end - start;
VLOG(1) << "fc " << fp_ms.count();
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void InnerProductLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (this->param_propagate_down_[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
// Gradient with respect to weight
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1.,
top_diff, bottom_data, (Dtype) 1.,
this->blobs_[0]->mutable_gpu_diff());
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bias
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype) 1., top_diff,
bias_multiplier_.gpu_data(), (Dtype) 1.,
this->blobs_[1]->mutable_gpu_diff());
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bottom data
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1.,
top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0.,
bottom[0]->mutable_gpu_diff());
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
if (this->param_propagate_down_[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
// Gradient with respect to weight
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasTrans,
CblasNoTrans, N_, K_, M_, (Dtype) 1.,
(cl_mem) top_diff, 0, (cl_mem) bottom_data, 0,
(Dtype) 1.,
(cl_mem) (this->blobs_[0]->mutable_gpu_diff()),
0);
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bias
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, M_, N_,
(Dtype) 1., (cl_mem) top_diff, 0,
(cl_mem) (bias_multiplier_.gpu_data()), 0,
(Dtype) 1.,
(cl_mem) (this->blobs_[1]->mutable_gpu_diff()),
0);
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bottom data
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans,
CblasNoTrans, M_, K_, N_, (Dtype) 1.,
(cl_mem) top_diff, 0,
(cl_mem) (this->blobs_[0]->gpu_data()), 0,
(Dtype) 0.,
(cl_mem) (bottom[0]->mutable_gpu_diff()), 0);
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer);
} // namespace caffe
|
4e5bf7cc8f68205cde7de8a432394ce5a62ead45.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* subHessianCalculater.cu
*
* Created on: 10 Jan 2017
* Author: Zeyi Wen
*/
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <hip/hip_runtime_api.h>
#include <sys/time.h>
#include "subHessianCalculator.h"
#include "../constant.h"
#include "../../SharedUtility/KeyValue.h"
#include "../../SharedUtility/Timer.h"
__global__ void RBFKernel(const float_point *selfDot0, const float_point *selfDot1,
float_point *dotProduct, int n, int m,
float gamma) {
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = idx / m;
int j = idx % m;
if (idx < n * m) {
dotProduct[idx] = expf(-(selfDot0[i] + selfDot1[j] - dotProduct[idx] * 2) * gamma);
}
}
/**
* @brief: create handle and descr for CSR matrix operations
*/
void SubHessianCalculator::prepareCSRContext(hipsparseHandle_t &handle, hipsparseMatDescr_t &descr){
hipsparseCreate(&handle);
hipsparseCreateMatDescr(&descr);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
}
/**
* @brief: release handle and descr
*/
void SubHessianCalculator::releaseCSRContext(hipsparseHandle_t &handle, hipsparseMatDescr_t &descr){
hipsparseDestroy(handle);
hipsparseDestroyMatDescr(descr);
}
/**
* @brief: compute a sub/whole kernel matrix
* @param: n is the number of rows of matrix0; m is the number of rows of matrix1; k is the dimension.
*/
void SubHessianCalculator::computeSubHessianMatrix(hipsparseHandle_t handle, hipsparseMatDescr_t descr,
CSRMatrix &csrMatrix0, int n, CSRMatrix &csrMatrix1, int m, int k,
float_point *devC, const SVMParam ¶m){
float_point *devVal0;
int *devRowPtr0, *devColInd0;
csrMatrix0.copy2Dev(devVal0, devRowPtr0, devColInd0);
float_point *devSelfDot0;
int nnz0 = csrMatrix0.getNnz();
checkCudaErrors(hipMalloc((void **) &devSelfDot0, sizeof(float_point) * n));
checkCudaErrors(hipMemcpy(devSelfDot0, csrMatrix0.getCSRValSelfDot(), sizeof(float_point) * n, hipMemcpyHostToDevice));
//initialize parameters of matrix1
int nnz1 = nnz0;
float_point *devVal1 = devVal0;
int *devRowPtr1 = devRowPtr0, *devColInd1 = devColInd0;
float_point *devSelfDot1 = devSelfDot0;
if(&csrMatrix1 != &csrMatrix0){//compare two addresses
csrMatrix1.copy2Dev(devVal1, devRowPtr1, devColInd1);
nnz1 = csrMatrix1.getNnz();
checkCudaErrors(hipMalloc((void **) &devSelfDot1, sizeof(float_point) * m));
checkCudaErrors(hipMemcpy(devSelfDot1, csrMatrix1.getCSRValSelfDot(), sizeof(float_point) * m, hipMemcpyHostToDevice));
}
CSRMatrix::CSRmm2Dense(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, n, m, k, descr,
nnz0, devVal0, devRowPtr0, devColInd0, descr, nnz1, devVal1, devRowPtr1, devColInd1, devC);
RBFKernel << < Ceil(n * m, BLOCK_SIZE), BLOCK_SIZE >> > (devSelfDot0, devSelfDot1, devC, n, m, param.gamma);
checkCudaErrors(hipFree(devSelfDot0));
csrMatrix0.freeDev(devVal0, devRowPtr0, devColInd0);
if(&csrMatrix1 != &csrMatrix0){
checkCudaErrors(hipFree(devSelfDot1));
csrMatrix1.freeDev(devVal1, devRowPtr1, devColInd1);
}
}
void SubHessianCalculator::preComputeSharedCache(vector<float_point*> &hostSharedCache, const SvmProblem &problem,
const SVMParam ¶m) {
hipsparseHandle_t handle;
hipsparseMatDescr_t descr;
prepareCSRContext(handle, descr);
for (int i = 0; i < problem.getNumOfClasses(); ++i) {
printf("pre-compute shared cache %d\n", i);
vector<vector<KeyValue> > oneClass = problem.getOneClassSamples(i);
int n = oneClass.size();
int k = problem.getNumOfFeatures();
CSRMatrix csrMatrix(oneClass, k);
float_point *devC;
checkCudaErrors(hipMalloc((void **) &devC, sizeof(float_point) * n * n));//this can be moved out of for-loop by reusing the memory.
computeSubHessianMatrix(handle, descr, csrMatrix, n, csrMatrix, n, k, devC, param);
checkCudaErrors(hipMemcpy(hostSharedCache[i], devC, sizeof(float_point) * n * n, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(devC));
}
releaseCSRContext(handle, descr);
}
void SubHessianCalculator::preComputeUniqueCache(int i, int j, const SvmProblem &subProblem,
vector<float_point*> &devUniqueCache, vector<size_t> &sizeOfEachRowInUniqueCache,
vector<int> &numOfElementEachRowInUniqueCache, const SVMParam ¶m) {
printf("pre-compute unique cache....");
hipsparseHandle_t handle;
hipsparseMatDescr_t descr;
prepareCSRContext(handle, descr);
int n = subProblem.count[0];
int m = subProblem.count[1];
int k = subProblem.getNumOfFeatures();
vector<vector<KeyValue> > samples0(subProblem.v_vSamples.begin(), subProblem.v_vSamples.begin() + n);
vector<vector<KeyValue> > samples1(subProblem.v_vSamples.begin() + n, subProblem.v_vSamples.begin() + n + m);
CSRMatrix csrMatrix0(samples0, k);
CSRMatrix csrMatrix1(samples1, k);
float_point *devC;
checkCudaErrors(hipMalloc((void **) &devC, sizeof(float_point) * n * m));
computeSubHessianMatrix(handle, descr, csrMatrix0, n, csrMatrix1, m, k, devC, param);
checkCudaErrors(hipMemcpy2D(devUniqueCache[0], sizeOfEachRowInUniqueCache[0], devC,
m * sizeof(float_point), m * sizeof(float_point), n, hipMemcpyDeviceToDevice));
//compute another sub kernel matrix by transposition
float const alpha(1.0);
float const beta(0.0);
hipblasHandle_t handle2;
hipblasCreate(&handle2);
hipblasSgeam(handle2, HIPBLAS_OP_T, HIPBLAS_OP_N, n, m, &alpha, devC, m, &beta, devC, n, devUniqueCache[1],
numOfElementEachRowInUniqueCache[1]);
hipblasDestroy(handle2);
checkCudaErrors(hipFree(devC));
releaseCSRContext(handle, descr);
printf("done\n");
}
void SubHessianCalculator::preComputeAndStoreInHost(float_point *hostHessianMatrix, const SvmProblem &problem,
bool &preComputeInHost, const SVMParam ¶m) {
printf("pre-compute in host\n");
preComputeInHost = true;
timeval start, end;
gettimeofday(&start,NULL);
vector<vector<KeyValue> > permutedSamples;
for (int i = 0; i < problem.v_vSamples.size(); ++i) {
permutedSamples.push_back(problem.v_vSamples[problem.perm[i]]);
}
hipsparseHandle_t handle;
hipsparseMatDescr_t descr;
prepareCSRContext(handle, descr);
int m = problem.getNumOfSamples();
int k = problem.getNumOfFeatures();
int n = m / 100;
float_point *devValA, *devValB, *devSelfDot;
int *devRowPtrA, *devColIndA, *devRowPtrB, *devColIndB;
float_point *devC;
CSRMatrix all(permutedSamples, k);
int nnzA = all.getNnz();
all.copy2Dev(devValA, devRowPtrA, devColIndA);
checkCudaErrors(hipMalloc((void **) &devSelfDot, sizeof(float_point) * m));
checkCudaErrors(hipMemcpy(devSelfDot, all.getCSRValSelfDot(), sizeof(float_point) * m, hipMemcpyHostToDevice));
printf("n = %d\n", n);
float totalTime = 0;
for (int i = 0; i < m / n + 1; ++i) {
CSRMatrix sub(
vector<vector<KeyValue> >(permutedSamples.begin() + n * i, permutedSamples.begin() + (n * (i + 1)>m?m:(n*(i+1)))),
k);
int tn = sub.getNumOfSamples();
int nnzB = sub.getNnz();
sub.copy2Dev(devValB, devRowPtrB, devColIndB);
checkCudaErrors(hipMalloc((void **) &devC, sizeof(float_point) * tn * m));
CSRMatrix::CSRmm2Dense(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, tn, m, k,
descr, nnzB, devValB, devRowPtrB, devColIndB, descr, nnzA, devValA, devRowPtrA,
devColIndA, devC);
RBFKernel << < Ceil(tn * m, BLOCK_SIZE), BLOCK_SIZE >> >
(devSelfDot + n * i, devSelfDot, devC, tn, m, param.gamma);
sub.freeDev(devValB, devRowPtrB, devColIndB);
checkCudaErrors(
hipMemcpy(hostHessianMatrix + n * m * i, devC, sizeof(float_point) * tn * m, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(devC));
}
checkCudaErrors(hipFree(devSelfDot));
releaseCSRContext(handle, descr);
gettimeofday(&end,NULL);
printf("time elapsed for pre-compute hessian matrix in host: %f\n", timeElapse(start,end));
}
void SubHessianCalculator::preComputeCache4BinaryProblem(float_point *devC, const SvmProblem &problem,
const SVMParam ¶m) {
hipsparseHandle_t handle;
hipsparseMatDescr_t descr;
prepareCSRContext(handle, descr);
CSRMatrix csrMatrix(problem.v_vSamples, problem.getNumOfFeatures());
int n = problem.getNumOfSamples();
int k = problem.getNumOfFeatures();
computeSubHessianMatrix(handle, descr, csrMatrix, n, csrMatrix, n, k, devC, param);
releaseCSRContext(handle, descr);
}
|
4e5bf7cc8f68205cde7de8a432394ce5a62ead45.cu
|
/*
* subHessianCalculater.cu
*
* Created on: 10 Jan 2017
* Author: Zeyi Wen
*/
#include <cuda.h>
#include <helper_cuda.h>
#include <cuda_runtime_api.h>
#include <sys/time.h>
#include "subHessianCalculator.h"
#include "../constant.h"
#include "../../SharedUtility/KeyValue.h"
#include "../../SharedUtility/Timer.h"
__global__ void RBFKernel(const float_point *selfDot0, const float_point *selfDot1,
float_point *dotProduct, int n, int m,
float gamma) {
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = idx / m;
int j = idx % m;
if (idx < n * m) {
dotProduct[idx] = expf(-(selfDot0[i] + selfDot1[j] - dotProduct[idx] * 2) * gamma);
}
}
/**
* @brief: create handle and descr for CSR matrix operations
*/
void SubHessianCalculator::prepareCSRContext(cusparseHandle_t &handle, cusparseMatDescr_t &descr){
cusparseCreate(&handle);
cusparseCreateMatDescr(&descr);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
}
/**
* @brief: release handle and descr
*/
void SubHessianCalculator::releaseCSRContext(cusparseHandle_t &handle, cusparseMatDescr_t &descr){
cusparseDestroy(handle);
cusparseDestroyMatDescr(descr);
}
/**
* @brief: compute a sub/whole kernel matrix
* @param: n is the number of rows of matrix0; m is the number of rows of matrix1; k is the dimension.
*/
void SubHessianCalculator::computeSubHessianMatrix(cusparseHandle_t handle, cusparseMatDescr_t descr,
CSRMatrix &csrMatrix0, int n, CSRMatrix &csrMatrix1, int m, int k,
float_point *devC, const SVMParam ¶m){
float_point *devVal0;
int *devRowPtr0, *devColInd0;
csrMatrix0.copy2Dev(devVal0, devRowPtr0, devColInd0);
float_point *devSelfDot0;
int nnz0 = csrMatrix0.getNnz();
checkCudaErrors(cudaMalloc((void **) &devSelfDot0, sizeof(float_point) * n));
checkCudaErrors(cudaMemcpy(devSelfDot0, csrMatrix0.getCSRValSelfDot(), sizeof(float_point) * n, cudaMemcpyHostToDevice));
//initialize parameters of matrix1
int nnz1 = nnz0;
float_point *devVal1 = devVal0;
int *devRowPtr1 = devRowPtr0, *devColInd1 = devColInd0;
float_point *devSelfDot1 = devSelfDot0;
if(&csrMatrix1 != &csrMatrix0){//compare two addresses
csrMatrix1.copy2Dev(devVal1, devRowPtr1, devColInd1);
nnz1 = csrMatrix1.getNnz();
checkCudaErrors(cudaMalloc((void **) &devSelfDot1, sizeof(float_point) * m));
checkCudaErrors(cudaMemcpy(devSelfDot1, csrMatrix1.getCSRValSelfDot(), sizeof(float_point) * m, cudaMemcpyHostToDevice));
}
CSRMatrix::CSRmm2Dense(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, n, m, k, descr,
nnz0, devVal0, devRowPtr0, devColInd0, descr, nnz1, devVal1, devRowPtr1, devColInd1, devC);
RBFKernel << < Ceil(n * m, BLOCK_SIZE), BLOCK_SIZE >> > (devSelfDot0, devSelfDot1, devC, n, m, param.gamma);
checkCudaErrors(cudaFree(devSelfDot0));
csrMatrix0.freeDev(devVal0, devRowPtr0, devColInd0);
if(&csrMatrix1 != &csrMatrix0){
checkCudaErrors(cudaFree(devSelfDot1));
csrMatrix1.freeDev(devVal1, devRowPtr1, devColInd1);
}
}
void SubHessianCalculator::preComputeSharedCache(vector<float_point*> &hostSharedCache, const SvmProblem &problem,
const SVMParam ¶m) {
cusparseHandle_t handle;
cusparseMatDescr_t descr;
prepareCSRContext(handle, descr);
for (int i = 0; i < problem.getNumOfClasses(); ++i) {
printf("pre-compute shared cache %d\n", i);
vector<vector<KeyValue> > oneClass = problem.getOneClassSamples(i);
int n = oneClass.size();
int k = problem.getNumOfFeatures();
CSRMatrix csrMatrix(oneClass, k);
float_point *devC;
checkCudaErrors(cudaMalloc((void **) &devC, sizeof(float_point) * n * n));//this can be moved out of for-loop by reusing the memory.
computeSubHessianMatrix(handle, descr, csrMatrix, n, csrMatrix, n, k, devC, param);
checkCudaErrors(cudaMemcpy(hostSharedCache[i], devC, sizeof(float_point) * n * n, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(devC));
}
releaseCSRContext(handle, descr);
}
void SubHessianCalculator::preComputeUniqueCache(int i, int j, const SvmProblem &subProblem,
vector<float_point*> &devUniqueCache, vector<size_t> &sizeOfEachRowInUniqueCache,
vector<int> &numOfElementEachRowInUniqueCache, const SVMParam ¶m) {
printf("pre-compute unique cache....");
cusparseHandle_t handle;
cusparseMatDescr_t descr;
prepareCSRContext(handle, descr);
int n = subProblem.count[0];
int m = subProblem.count[1];
int k = subProblem.getNumOfFeatures();
vector<vector<KeyValue> > samples0(subProblem.v_vSamples.begin(), subProblem.v_vSamples.begin() + n);
vector<vector<KeyValue> > samples1(subProblem.v_vSamples.begin() + n, subProblem.v_vSamples.begin() + n + m);
CSRMatrix csrMatrix0(samples0, k);
CSRMatrix csrMatrix1(samples1, k);
float_point *devC;
checkCudaErrors(cudaMalloc((void **) &devC, sizeof(float_point) * n * m));
computeSubHessianMatrix(handle, descr, csrMatrix0, n, csrMatrix1, m, k, devC, param);
checkCudaErrors(cudaMemcpy2D(devUniqueCache[0], sizeOfEachRowInUniqueCache[0], devC,
m * sizeof(float_point), m * sizeof(float_point), n, cudaMemcpyDeviceToDevice));
//compute another sub kernel matrix by transposition
float const alpha(1.0);
float const beta(0.0);
cublasHandle_t handle2;
cublasCreate(&handle2);
cublasSgeam(handle2, CUBLAS_OP_T, CUBLAS_OP_N, n, m, &alpha, devC, m, &beta, devC, n, devUniqueCache[1],
numOfElementEachRowInUniqueCache[1]);
cublasDestroy(handle2);
checkCudaErrors(cudaFree(devC));
releaseCSRContext(handle, descr);
printf("done\n");
}
void SubHessianCalculator::preComputeAndStoreInHost(float_point *hostHessianMatrix, const SvmProblem &problem,
bool &preComputeInHost, const SVMParam ¶m) {
printf("pre-compute in host\n");
preComputeInHost = true;
timeval start, end;
gettimeofday(&start,NULL);
vector<vector<KeyValue> > permutedSamples;
for (int i = 0; i < problem.v_vSamples.size(); ++i) {
permutedSamples.push_back(problem.v_vSamples[problem.perm[i]]);
}
cusparseHandle_t handle;
cusparseMatDescr_t descr;
prepareCSRContext(handle, descr);
int m = problem.getNumOfSamples();
int k = problem.getNumOfFeatures();
int n = m / 100;
float_point *devValA, *devValB, *devSelfDot;
int *devRowPtrA, *devColIndA, *devRowPtrB, *devColIndB;
float_point *devC;
CSRMatrix all(permutedSamples, k);
int nnzA = all.getNnz();
all.copy2Dev(devValA, devRowPtrA, devColIndA);
checkCudaErrors(cudaMalloc((void **) &devSelfDot, sizeof(float_point) * m));
checkCudaErrors(cudaMemcpy(devSelfDot, all.getCSRValSelfDot(), sizeof(float_point) * m, cudaMemcpyHostToDevice));
printf("n = %d\n", n);
float totalTime = 0;
for (int i = 0; i < m / n + 1; ++i) {
CSRMatrix sub(
vector<vector<KeyValue> >(permutedSamples.begin() + n * i, permutedSamples.begin() + (n * (i + 1)>m?m:(n*(i+1)))),
k);
int tn = sub.getNumOfSamples();
int nnzB = sub.getNnz();
sub.copy2Dev(devValB, devRowPtrB, devColIndB);
checkCudaErrors(cudaMalloc((void **) &devC, sizeof(float_point) * tn * m));
CSRMatrix::CSRmm2Dense(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, tn, m, k,
descr, nnzB, devValB, devRowPtrB, devColIndB, descr, nnzA, devValA, devRowPtrA,
devColIndA, devC);
RBFKernel << < Ceil(tn * m, BLOCK_SIZE), BLOCK_SIZE >> >
(devSelfDot + n * i, devSelfDot, devC, tn, m, param.gamma);
sub.freeDev(devValB, devRowPtrB, devColIndB);
checkCudaErrors(
cudaMemcpy(hostHessianMatrix + n * m * i, devC, sizeof(float_point) * tn * m, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(devC));
}
checkCudaErrors(cudaFree(devSelfDot));
releaseCSRContext(handle, descr);
gettimeofday(&end,NULL);
printf("time elapsed for pre-compute hessian matrix in host: %f\n", timeElapse(start,end));
}
void SubHessianCalculator::preComputeCache4BinaryProblem(float_point *devC, const SvmProblem &problem,
const SVMParam ¶m) {
cusparseHandle_t handle;
cusparseMatDescr_t descr;
prepareCSRContext(handle, descr);
CSRMatrix csrMatrix(problem.v_vSamples, problem.getNumOfFeatures());
int n = problem.getNumOfSamples();
int k = problem.getNumOfFeatures();
computeSubHessianMatrix(handle, descr, csrMatrix, n, csrMatrix, n, k, devC, param);
releaseCSRContext(handle, descr);
}
|
2138a7d050b29c453d6d4f1d484691291953c844.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Two kernels, no shared memory, manual laplacian, 1D malloc */
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__constant__ float fd_d[3];
// Device code
__global__ void step_d(const float *const model,
float *wfc,
float *wfp,
const int nb, const int nz, const int nx)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int z = blockDim.y * blockIdx.y + threadIdx.y;
int b = blockDim.z * blockIdx.z + threadIdx.z;
int i = z * nx + x;
int ib = b * nz * nx + i;
float lap;
bool in_domain = (x > 1) && (x < nx - 2)
&& (z > 1) && (z < nz - 2)
&& (b < nb);
if (in_domain)
{
/* Laplacian */
lap = (fd_d[0] * wfc[ib] +
fd_d[1] *
(wfc[ib + 1] +
wfc[ib - 1] +
wfc[ib + nx] +
wfc[ib - nx]) +
fd_d[2] *
(wfc[ib + 2] +
wfc[ib - 2] +
wfc[ib + 2 * nx] +
wfc[ib - 2 * nx]));
/* Main evolution equation */
wfp[ib] = model[i] * lap + 2 * wfc[ib] - wfp[ib];
}
}
__global__ void add_sources_d(const float *const model,
float *wfp,
const float *const source_amplitude,
const int *const sources_z,
const int *const sources_x,
const int nz, const int nx,
const int nt, const int ns, const int it)
{
int x = threadIdx.x;
int b = blockIdx.x;
int i = sources_z[b * ns + x] * nx + sources_x[b * ns + x];
int ib = b * nz * nx + i;
wfp[ib] += source_amplitude[b * ns * nt + x * nt + it] * model[i];
}
// Host code
extern "C"
void setup(int nb, int nz, int nx, float dx, float *model_h,
float **model_d, float **wfc_d, float **wfp_d)
{
float fd[3] = {
-10.0f / 2 / (dx * dx),
4.0f / 3 / (dx * dx),
-1.0f / 12 / (dx * dx)
};
gpuErrchk(hipMemcpyToSymbol(fd_d, fd, 3*sizeof(float)));
int nmodel = nz * nx;
int nwf = nb * nmodel;
size_t nmodelbytes = nmodel * sizeof(float);
size_t nwfbytes = nwf * sizeof(float);
gpuErrchk(hipMalloc(model_d, nmodelbytes));
gpuErrchk(hipMemcpy(*model_d, model_h, nmodelbytes,
hipMemcpyHostToDevice));
gpuErrchk(hipMalloc(wfc_d, nwfbytes));
gpuErrchk(hipMemset(*wfc_d, 0, nwfbytes));
gpuErrchk(hipMalloc(wfp_d, nwfbytes));
gpuErrchk(hipMemset(*wfc_d, 0, nwfbytes));
}
extern "C"
void step(int nb, int nz, int nx, int nt, int ns,
float *model_d, float *wfc_d, float *wfp_d,
float *source_amplitude_h,
int *sources_z_h, int *sources_x_h, float *wfc_h)
{
int tns = nb * ns;
float *source_amplitude_d;
size_t nbytes = tns * nt * sizeof(float);
gpuErrchk(hipMalloc(&source_amplitude_d, nbytes));
gpuErrchk(hipMemcpy(source_amplitude_d, source_amplitude_h, nbytes,
hipMemcpyHostToDevice));
int *sources_z_d;
nbytes = tns * sizeof(int);
gpuErrchk(hipMalloc(&sources_z_d, nbytes));
gpuErrchk(hipMemcpy(sources_z_d, sources_z_h, nbytes,
hipMemcpyHostToDevice));
int *sources_x_d;
nbytes = tns * sizeof(int);
gpuErrchk(hipMalloc(&sources_x_d, nbytes));
gpuErrchk(hipMemcpy(sources_x_d, sources_x_h, nbytes,
hipMemcpyHostToDevice));
dim3 dimBlock(32, 32, 1);
int gridx = (nx + dimBlock.x - 1) / dimBlock.x;
int gridz = (nz + dimBlock.y - 1) / dimBlock.y;
int gridb = (nb + dimBlock.z - 1) / dimBlock.z;
dim3 dimGrid(gridx, gridz, gridb);
int it;
float *tmp;
for (it = 0; it < nt; it++)
{
hipLaunchKernelGGL(( step_d), dim3(dimGrid), dim3(dimBlock), 0, 0, model_d, wfc_d, wfp_d,
nb, nz, nx);
gpuErrchk( hipPeekAtLastError() );
hipLaunchKernelGGL(( add_sources_d), dim3(nb), dim3(ns), 0, 0, model_d, wfp_d,
source_amplitude_d, sources_z_d, sources_x_d,
nz, nx, nt, ns, it);
gpuErrchk( hipPeekAtLastError() );
tmp = wfc_d;
wfc_d = wfp_d;
wfp_d = tmp;
}
int nwf = nb * nz * nx;
size_t nwfbytes = nwf * sizeof(float);
gpuErrchk(hipMemcpy(wfc_h, wfc_d, nwfbytes, hipMemcpyDeviceToHost));
gpuErrchk(hipFree(source_amplitude_d));
gpuErrchk(hipFree(sources_z_d));
gpuErrchk(hipFree(sources_x_d));
}
extern "C"
void finalise(float *model_d, float *wfc_d, float *wfp_d)
{
gpuErrchk(hipFree(model_d));
gpuErrchk(hipFree(wfc_d));
gpuErrchk(hipFree(wfp_d));
}
|
2138a7d050b29c453d6d4f1d484691291953c844.cu
|
/* Two kernels, no shared memory, manual laplacian, 1D malloc */
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__constant__ float fd_d[3];
// Device code
__global__ void step_d(const float *const model,
float *wfc,
float *wfp,
const int nb, const int nz, const int nx)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int z = blockDim.y * blockIdx.y + threadIdx.y;
int b = blockDim.z * blockIdx.z + threadIdx.z;
int i = z * nx + x;
int ib = b * nz * nx + i;
float lap;
bool in_domain = (x > 1) && (x < nx - 2)
&& (z > 1) && (z < nz - 2)
&& (b < nb);
if (in_domain)
{
/* Laplacian */
lap = (fd_d[0] * wfc[ib] +
fd_d[1] *
(wfc[ib + 1] +
wfc[ib - 1] +
wfc[ib + nx] +
wfc[ib - nx]) +
fd_d[2] *
(wfc[ib + 2] +
wfc[ib - 2] +
wfc[ib + 2 * nx] +
wfc[ib - 2 * nx]));
/* Main evolution equation */
wfp[ib] = model[i] * lap + 2 * wfc[ib] - wfp[ib];
}
}
__global__ void add_sources_d(const float *const model,
float *wfp,
const float *const source_amplitude,
const int *const sources_z,
const int *const sources_x,
const int nz, const int nx,
const int nt, const int ns, const int it)
{
int x = threadIdx.x;
int b = blockIdx.x;
int i = sources_z[b * ns + x] * nx + sources_x[b * ns + x];
int ib = b * nz * nx + i;
wfp[ib] += source_amplitude[b * ns * nt + x * nt + it] * model[i];
}
// Host code
extern "C"
void setup(int nb, int nz, int nx, float dx, float *model_h,
float **model_d, float **wfc_d, float **wfp_d)
{
float fd[3] = {
-10.0f / 2 / (dx * dx),
4.0f / 3 / (dx * dx),
-1.0f / 12 / (dx * dx)
};
gpuErrchk(cudaMemcpyToSymbol(fd_d, fd, 3*sizeof(float)));
int nmodel = nz * nx;
int nwf = nb * nmodel;
size_t nmodelbytes = nmodel * sizeof(float);
size_t nwfbytes = nwf * sizeof(float);
gpuErrchk(cudaMalloc(model_d, nmodelbytes));
gpuErrchk(cudaMemcpy(*model_d, model_h, nmodelbytes,
cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(wfc_d, nwfbytes));
gpuErrchk(cudaMemset(*wfc_d, 0, nwfbytes));
gpuErrchk(cudaMalloc(wfp_d, nwfbytes));
gpuErrchk(cudaMemset(*wfc_d, 0, nwfbytes));
}
extern "C"
void step(int nb, int nz, int nx, int nt, int ns,
float *model_d, float *wfc_d, float *wfp_d,
float *source_amplitude_h,
int *sources_z_h, int *sources_x_h, float *wfc_h)
{
int tns = nb * ns;
float *source_amplitude_d;
size_t nbytes = tns * nt * sizeof(float);
gpuErrchk(cudaMalloc(&source_amplitude_d, nbytes));
gpuErrchk(cudaMemcpy(source_amplitude_d, source_amplitude_h, nbytes,
cudaMemcpyHostToDevice));
int *sources_z_d;
nbytes = tns * sizeof(int);
gpuErrchk(cudaMalloc(&sources_z_d, nbytes));
gpuErrchk(cudaMemcpy(sources_z_d, sources_z_h, nbytes,
cudaMemcpyHostToDevice));
int *sources_x_d;
nbytes = tns * sizeof(int);
gpuErrchk(cudaMalloc(&sources_x_d, nbytes));
gpuErrchk(cudaMemcpy(sources_x_d, sources_x_h, nbytes,
cudaMemcpyHostToDevice));
dim3 dimBlock(32, 32, 1);
int gridx = (nx + dimBlock.x - 1) / dimBlock.x;
int gridz = (nz + dimBlock.y - 1) / dimBlock.y;
int gridb = (nb + dimBlock.z - 1) / dimBlock.z;
dim3 dimGrid(gridx, gridz, gridb);
int it;
float *tmp;
for (it = 0; it < nt; it++)
{
step_d<<<dimGrid, dimBlock>>>(model_d, wfc_d, wfp_d,
nb, nz, nx);
gpuErrchk( cudaPeekAtLastError() );
add_sources_d<<<nb, ns>>>(model_d, wfp_d,
source_amplitude_d, sources_z_d, sources_x_d,
nz, nx, nt, ns, it);
gpuErrchk( cudaPeekAtLastError() );
tmp = wfc_d;
wfc_d = wfp_d;
wfp_d = tmp;
}
int nwf = nb * nz * nx;
size_t nwfbytes = nwf * sizeof(float);
gpuErrchk(cudaMemcpy(wfc_h, wfc_d, nwfbytes, cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(source_amplitude_d));
gpuErrchk(cudaFree(sources_z_d));
gpuErrchk(cudaFree(sources_x_d));
}
extern "C"
void finalise(float *model_d, float *wfc_d, float *wfp_d)
{
gpuErrchk(cudaFree(model_d));
gpuErrchk(cudaFree(wfc_d));
gpuErrchk(cudaFree(wfp_d));
}
|
3e72012a4da17487a314b36e3b48cca4693d0cf4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "domain_transform_solver.h"
#include <iostream>
#include <limits>
#include "cudaArray2D.h"
#include "color_space.cuh"
#include "cub_hip.cuh"
#include "cumsum_kernels2d.cuh"
#include "domain_transform_common_functions.h"
#include "domain_transform_common_kernels.cuh"
#include "domain_transform_filter.cuh"
#include "domain_transform_filter_struct.cuh"
#include "domain_transform_solver.cuh"
#include "domain_transform_solver_struct.cuh"
#include "error_types.h"
namespace domain_transform {
template <typename CudaArrayType1, typename CudaArrayType2,
typename CudaArrayType3>
__global__ void ComputeConfidenceKernel(const ImageDim image_dim,
const float variance_scale,
const int left_clear_width,
CudaArrayType1 dtz2, CudaArrayType2 dtz,
CudaArrayType3 confidence) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= 0 && y >= 0 && x < image_dim.width && y < image_dim.height) {
if (x > left_clear_width) {
const float dtz_val = dtz.get(x, y);
const float variance = dtz2.get(x, y) - dtz_val * dtz_val;
const float conf = variance < 0 || !isfinite(variance)
? 0
: expf(-variance * variance_scale);
confidence.set(x, y, conf);
} else {
confidence.set(x, y, 0);
}
}
}
void DomainTransformSolver::InitFrame(const COLOR_SPACE &color_space,
void *color_image, float *target,
float *confidence) {
DomainTransformFilter::InitFrame(color_space, color_image);
solver_struct_->target.View(0, 0, image_dims_.width, image_dims_.height) =
target;
GPU_CHECK(hipPeekAtLastError());
if (confidence == nullptr) {
solver_struct_->confidence.Fill(1.0f);
} else {
solver_struct_->confidence.View(0, 0, image_dims_.width,
image_dims_.height) = confidence;
GPU_CHECK(hipPeekAtLastError());
}
}
void DomainTransformSolver::Download(const ImageType &image_type,
void *image) const {
switch (image_type) {
case ImageType::TARGET: {
solver_struct_->target.View(0, 0, image_dims_.width, image_dims_.height)
.CopyTo(reinterpret_cast<float *>(image));
GPU_CHECK(hipPeekAtLastError());
break;
}
case ImageType::CONFIDENCE: {
solver_struct_->confidence
.View(0, 0, image_dims_.width, image_dims_.height)
.CopyTo(reinterpret_cast<float *>(image));
GPU_CHECK(hipPeekAtLastError());
break;
}
case ImageType::OPTIMIZED_QUANTITY: {
filter_struct_->var.View(0, 0, image_dims_.width, image_dims_.height)
.CopyTo(reinterpret_cast<float *>(image));
GPU_CHECK(hipPeekAtLastError());
break;
}
default: { DomainTransformFilter::Download(image_type, image); }
}
}
void DomainTransformSolver::ComputeColorSpaceDifferential(
const DomainFilterParams &domain_filter_params) {
filter_params_ = domain_filter_params;
DomainTransformFilter::ComputeColorSpaceDifferential(filter_params_);
}
DomainTransformSolver::DomainTransformSolver(const ImageDim &max_image_dims)
: DomainTransformFilter(max_image_dims),
solver_struct_(new DomainTransformSolverStruct(max_image_dims.width,
max_image_dims.height)) {}
void DomainTransformSolver::Optimize(
const DomainOptimizeParams &optimize_params,
const float overwrite_target_above_conf) {
dim3 block_dim, grid_dim;
ComputeBlockAndGridDim2D<false>(image_dims_, &block_dim, &grid_dim);
cua::CudaArray2D<float> filtered_var = filter_struct_->var_buffer;
cua::CudaArray2D<float> var = filter_struct_->var;
hipLaunchKernelGGL(( CopyVariable), dim3(grid_dim), dim3(block_dim), 0, 0, image_dims_, solver_struct_->target,
var);
GPU_CHECK(hipPeekAtLastError());
filtered_var.Fill(0.0f);
constexpr int kDMIter = 1;
for (int i = 0; i < optimize_params.num_iterations; i++) {
for (int j = 0; j < kDMIter; j++) {
float sigma = optimize_params.sigma_z;
if (j != 0) {
const float multiplier = 3 * ::pow(2, (kDMIter - (j + 1))) /
std::sqrt(::pow(4, kDMIter) - 1);
sigma *= multiplier;
}
if (overwrite_target_above_conf > 0 && overwrite_target_above_conf < 1) {
hipLaunchKernelGGL(( CopyVariableFlagGreaterThanThresh), dim3(grid_dim), dim3(block_dim), 0, 0,
image_dims_, overwrite_target_above_conf,
solver_struct_->confidence, solver_struct_->target, var);
}
IntegrateVariable(image_dims_, var, &filter_struct_->summed_area_x);
switch (optimize_params.loss) {
case RobustLoss::CHARBONNIER: {
hipLaunchKernelGGL(( OptimizeX<RobustLoss::CHARBONNIER>), dim3(grid_dim), dim3(block_dim), 0, 0,
image_dims_, var, solver_struct_->target,
solver_struct_->confidence, filter_struct_->ct_H, sigma,
optimize_params.step_size, filter_struct_->summed_area_x,
optimize_params.lambda, filtered_var);
GPU_CHECK(hipPeekAtLastError());
break;
}
case RobustLoss::L2: {
hipLaunchKernelGGL(( OptimizeX<RobustLoss::L2>), dim3(grid_dim), dim3(block_dim), 0, 0,
image_dims_, var, solver_struct_->target,
solver_struct_->confidence, filter_struct_->ct_H, sigma,
optimize_params.step_size, filter_struct_->summed_area_x,
optimize_params.lambda, filtered_var);
GPU_CHECK(hipPeekAtLastError());
break;
}
}
// -----------------------------------------------------------
// Swap buffers.
cua::CudaArray2D<float> tmp = filtered_var;
filtered_var = var;
var = tmp;
if (overwrite_target_above_conf > 0 && overwrite_target_above_conf < 1) {
hipLaunchKernelGGL(( CopyVariableFlagGreaterThanThresh), dim3(grid_dim), dim3(block_dim), 0, 0,
image_dims_, overwrite_target_above_conf,
solver_struct_->confidence, solver_struct_->target, var);
}
IntegrateVariable(image_dims_, var, &filter_struct_->summed_area_y,
&filter_struct_->parallel_scan_transpose);
dim3 block_dim_t, grid_dim_t;
ComputeBlockAndGridDim2D<true>(image_dims_, &block_dim_t, &grid_dim_t);
switch (optimize_params.loss) {
case RobustLoss::CHARBONNIER: {
hipLaunchKernelGGL(( OptimizeY<RobustLoss::CHARBONNIER>), dim3(grid_dim_t), dim3(block_dim_t), 0, 0,
image_dims_, var, solver_struct_->target,
solver_struct_->confidence, filter_struct_->ct_V, sigma,
optimize_params.step_size, filter_struct_->summed_area_y,
optimize_params.lambda, filtered_var);
GPU_CHECK(hipPeekAtLastError());
break;
}
case RobustLoss::L2: {
hipLaunchKernelGGL(( OptimizeY<RobustLoss::L2>), dim3(grid_dim_t), dim3(block_dim_t), 0, 0,
image_dims_, var, solver_struct_->target,
solver_struct_->confidence, filter_struct_->ct_V, sigma,
optimize_params.step_size, filter_struct_->summed_area_y,
optimize_params.lambda, filtered_var);
GPU_CHECK(hipPeekAtLastError());
break;
}
}
// -----------------------------------------------------------
// Swap buffers.
tmp = filtered_var;
filtered_var = var;
var = tmp;
}
}
hipLaunchKernelGGL(( CopyVariable), dim3(grid_dim), dim3(block_dim), 0, 0, image_dims_, var, filter_struct_->var);
GPU_CHECK(hipPeekAtLastError());
}
DomainTransformSolver::~DomainTransformSolver() {}
void DomainTransformSolver::ComputeConfidence(
const DomainFilterParamsVec<1> &conf_params,
const int left_side_clear_width) {
// Compute the confidence in an edge aware way using the domain trasform as a
// means of local variance estimator. See
// \url{https://drive.google.com/file/d/0B4nuwEMaEsnmdEREcjhlSXM2NGs/view}
// Barron and Poole ECCV16 supplement page 6 for more details.
// Assumes that the differentials have already been taken.
// Compute DT(Z) at confidence.
// Compute DT(Z^2) at confidence_square.
// Compute find variance = DT(Z^2) - DT(Z)^2 as confidence.
dim3 block_dim, grid_dim;
ComputeBlockAndGridDim2D<false>(image_dims_, &block_dim, &grid_dim);
DomainFilterParams local_filter_params;
local_filter_params.sigma_x = conf_params.sigma_x;
local_filter_params.sigma_y = conf_params.sigma_y;
local_filter_params.sigma_r = conf_params.sigma_r;
const int kNimDTFilterIteration = 3;
Filter(local_filter_params, kNimDTFilterIteration, &solver_struct_->target,
&solver_struct_->confidence);
hipLaunchKernelGGL(( SquareVariable), dim3(grid_dim), dim3(block_dim), 0, 0,
image_dims_, solver_struct_->target,
solver_struct_->confidence_square_buffer);
GPU_CHECK(hipPeekAtLastError());
Filter(local_filter_params, kNimDTFilterIteration,
&solver_struct_->confidence_square_buffer,
&solver_struct_->confidence_square);
const float variance_scale =
0.5f / (conf_params.sigmas[0] * conf_params.sigmas[0]);
hipLaunchKernelGGL(( ComputeConfidenceKernel), dim3(grid_dim), dim3(block_dim), 0, 0,
image_dims_, variance_scale, left_side_clear_width,
solver_struct_->confidence_square, solver_struct_->confidence,
solver_struct_->confidence);
GPU_CHECK(hipPeekAtLastError());
}
void DomainTransformSolver::ClearAll() {
DomainTransformFilter::ClearAll();
solver_struct_->confidence.Fill(0.0f);
solver_struct_->confidence_square.Fill(0.0f);
solver_struct_->confidence_square_buffer.Fill(0.0f);
solver_struct_->target.Fill(0.0f);
}
void DomainTransformSolver::IntegrateColorDifferentials() {
DomainTransformFilter::IntegrateColorDifferentials();
}
} // namespace domain_transform
|
3e72012a4da17487a314b36e3b48cca4693d0cf4.cu
|
#include "domain_transform_solver.h"
#include <iostream>
#include <limits>
#include "cudaArray2D.h"
#include "color_space.cuh"
#include "cub.cuh"
#include "cumsum_kernels2d.cuh"
#include "domain_transform_common_functions.h"
#include "domain_transform_common_kernels.cuh"
#include "domain_transform_filter.cuh"
#include "domain_transform_filter_struct.cuh"
#include "domain_transform_solver.cuh"
#include "domain_transform_solver_struct.cuh"
#include "error_types.h"
namespace domain_transform {
template <typename CudaArrayType1, typename CudaArrayType2,
typename CudaArrayType3>
__global__ void ComputeConfidenceKernel(const ImageDim image_dim,
const float variance_scale,
const int left_clear_width,
CudaArrayType1 dtz2, CudaArrayType2 dtz,
CudaArrayType3 confidence) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= 0 && y >= 0 && x < image_dim.width && y < image_dim.height) {
if (x > left_clear_width) {
const float dtz_val = dtz.get(x, y);
const float variance = dtz2.get(x, y) - dtz_val * dtz_val;
const float conf = variance < 0 || !isfinite(variance)
? 0
: expf(-variance * variance_scale);
confidence.set(x, y, conf);
} else {
confidence.set(x, y, 0);
}
}
}
void DomainTransformSolver::InitFrame(const COLOR_SPACE &color_space,
void *color_image, float *target,
float *confidence) {
DomainTransformFilter::InitFrame(color_space, color_image);
solver_struct_->target.View(0, 0, image_dims_.width, image_dims_.height) =
target;
GPU_CHECK(cudaPeekAtLastError());
if (confidence == nullptr) {
solver_struct_->confidence.Fill(1.0f);
} else {
solver_struct_->confidence.View(0, 0, image_dims_.width,
image_dims_.height) = confidence;
GPU_CHECK(cudaPeekAtLastError());
}
}
void DomainTransformSolver::Download(const ImageType &image_type,
void *image) const {
switch (image_type) {
case ImageType::TARGET: {
solver_struct_->target.View(0, 0, image_dims_.width, image_dims_.height)
.CopyTo(reinterpret_cast<float *>(image));
GPU_CHECK(cudaPeekAtLastError());
break;
}
case ImageType::CONFIDENCE: {
solver_struct_->confidence
.View(0, 0, image_dims_.width, image_dims_.height)
.CopyTo(reinterpret_cast<float *>(image));
GPU_CHECK(cudaPeekAtLastError());
break;
}
case ImageType::OPTIMIZED_QUANTITY: {
filter_struct_->var.View(0, 0, image_dims_.width, image_dims_.height)
.CopyTo(reinterpret_cast<float *>(image));
GPU_CHECK(cudaPeekAtLastError());
break;
}
default: { DomainTransformFilter::Download(image_type, image); }
}
}
void DomainTransformSolver::ComputeColorSpaceDifferential(
const DomainFilterParams &domain_filter_params) {
filter_params_ = domain_filter_params;
DomainTransformFilter::ComputeColorSpaceDifferential(filter_params_);
}
DomainTransformSolver::DomainTransformSolver(const ImageDim &max_image_dims)
: DomainTransformFilter(max_image_dims),
solver_struct_(new DomainTransformSolverStruct(max_image_dims.width,
max_image_dims.height)) {}
void DomainTransformSolver::Optimize(
const DomainOptimizeParams &optimize_params,
const float overwrite_target_above_conf) {
dim3 block_dim, grid_dim;
ComputeBlockAndGridDim2D<false>(image_dims_, &block_dim, &grid_dim);
cua::CudaArray2D<float> filtered_var = filter_struct_->var_buffer;
cua::CudaArray2D<float> var = filter_struct_->var;
CopyVariable<<<grid_dim, block_dim>>>(image_dims_, solver_struct_->target,
var);
GPU_CHECK(cudaPeekAtLastError());
filtered_var.Fill(0.0f);
constexpr int kDMIter = 1;
for (int i = 0; i < optimize_params.num_iterations; i++) {
for (int j = 0; j < kDMIter; j++) {
float sigma = optimize_params.sigma_z;
if (j != 0) {
const float multiplier = 3 * std::pow(2, (kDMIter - (j + 1))) /
std::sqrt(std::pow(4, kDMIter) - 1);
sigma *= multiplier;
}
if (overwrite_target_above_conf > 0 && overwrite_target_above_conf < 1) {
CopyVariableFlagGreaterThanThresh<<<grid_dim, block_dim>>>(
image_dims_, overwrite_target_above_conf,
solver_struct_->confidence, solver_struct_->target, var);
}
IntegrateVariable(image_dims_, var, &filter_struct_->summed_area_x);
switch (optimize_params.loss) {
case RobustLoss::CHARBONNIER: {
OptimizeX<RobustLoss::CHARBONNIER><<<grid_dim, block_dim>>>(
image_dims_, var, solver_struct_->target,
solver_struct_->confidence, filter_struct_->ct_H, sigma,
optimize_params.step_size, filter_struct_->summed_area_x,
optimize_params.lambda, filtered_var);
GPU_CHECK(cudaPeekAtLastError());
break;
}
case RobustLoss::L2: {
OptimizeX<RobustLoss::L2><<<grid_dim, block_dim>>>(
image_dims_, var, solver_struct_->target,
solver_struct_->confidence, filter_struct_->ct_H, sigma,
optimize_params.step_size, filter_struct_->summed_area_x,
optimize_params.lambda, filtered_var);
GPU_CHECK(cudaPeekAtLastError());
break;
}
}
// -----------------------------------------------------------
// Swap buffers.
cua::CudaArray2D<float> tmp = filtered_var;
filtered_var = var;
var = tmp;
if (overwrite_target_above_conf > 0 && overwrite_target_above_conf < 1) {
CopyVariableFlagGreaterThanThresh<<<grid_dim, block_dim>>>(
image_dims_, overwrite_target_above_conf,
solver_struct_->confidence, solver_struct_->target, var);
}
IntegrateVariable(image_dims_, var, &filter_struct_->summed_area_y,
&filter_struct_->parallel_scan_transpose);
dim3 block_dim_t, grid_dim_t;
ComputeBlockAndGridDim2D<true>(image_dims_, &block_dim_t, &grid_dim_t);
switch (optimize_params.loss) {
case RobustLoss::CHARBONNIER: {
OptimizeY<RobustLoss::CHARBONNIER><<<grid_dim_t, block_dim_t>>>(
image_dims_, var, solver_struct_->target,
solver_struct_->confidence, filter_struct_->ct_V, sigma,
optimize_params.step_size, filter_struct_->summed_area_y,
optimize_params.lambda, filtered_var);
GPU_CHECK(cudaPeekAtLastError());
break;
}
case RobustLoss::L2: {
OptimizeY<RobustLoss::L2><<<grid_dim_t, block_dim_t>>>(
image_dims_, var, solver_struct_->target,
solver_struct_->confidence, filter_struct_->ct_V, sigma,
optimize_params.step_size, filter_struct_->summed_area_y,
optimize_params.lambda, filtered_var);
GPU_CHECK(cudaPeekAtLastError());
break;
}
}
// -----------------------------------------------------------
// Swap buffers.
tmp = filtered_var;
filtered_var = var;
var = tmp;
}
}
CopyVariable<<<grid_dim, block_dim>>>(image_dims_, var, filter_struct_->var);
GPU_CHECK(cudaPeekAtLastError());
}
DomainTransformSolver::~DomainTransformSolver() {}
void DomainTransformSolver::ComputeConfidence(
const DomainFilterParamsVec<1> &conf_params,
const int left_side_clear_width) {
// Compute the confidence in an edge aware way using the domain trasform as a
// means of local variance estimator. See
// \url{https://drive.google.com/file/d/0B4nuwEMaEsnmdEREcjhlSXM2NGs/view}
// Barron and Poole ECCV16 supplement page 6 for more details.
// Assumes that the differentials have already been taken.
// Compute DT(Z) at confidence.
// Compute DT(Z^2) at confidence_square.
// Compute find variance = DT(Z^2) - DT(Z)^2 as confidence.
dim3 block_dim, grid_dim;
ComputeBlockAndGridDim2D<false>(image_dims_, &block_dim, &grid_dim);
DomainFilterParams local_filter_params;
local_filter_params.sigma_x = conf_params.sigma_x;
local_filter_params.sigma_y = conf_params.sigma_y;
local_filter_params.sigma_r = conf_params.sigma_r;
const int kNimDTFilterIteration = 3;
Filter(local_filter_params, kNimDTFilterIteration, &solver_struct_->target,
&solver_struct_->confidence);
SquareVariable<<<grid_dim, block_dim>>>(
image_dims_, solver_struct_->target,
solver_struct_->confidence_square_buffer);
GPU_CHECK(cudaPeekAtLastError());
Filter(local_filter_params, kNimDTFilterIteration,
&solver_struct_->confidence_square_buffer,
&solver_struct_->confidence_square);
const float variance_scale =
0.5f / (conf_params.sigmas[0] * conf_params.sigmas[0]);
ComputeConfidenceKernel<<<grid_dim, block_dim>>>(
image_dims_, variance_scale, left_side_clear_width,
solver_struct_->confidence_square, solver_struct_->confidence,
solver_struct_->confidence);
GPU_CHECK(cudaPeekAtLastError());
}
void DomainTransformSolver::ClearAll() {
DomainTransformFilter::ClearAll();
solver_struct_->confidence.Fill(0.0f);
solver_struct_->confidence_square.Fill(0.0f);
solver_struct_->confidence_square_buffer.Fill(0.0f);
solver_struct_->target.Fill(0.0f);
}
void DomainTransformSolver::IntegrateColorDifferentials() {
DomainTransformFilter::IntegrateColorDifferentials();
}
} // namespace domain_transform
|
fca0a211fdb9eef531b4c8462a5d5f4551827504.hip
|
// !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 80
#define F 4
#define ITERATIONS (unsigned)( 10000 )
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int iterations){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int m_sum=0;
for (unsigned j=0; j<iterations; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
fca0a211fdb9eef531b4c8462a5d5f4551827504.cu
|
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 80
#define F 4
#define ITERATIONS (unsigned)( 10000 )
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int iterations){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int m_sum=0;
for (unsigned j=0; j<iterations; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
6a40e95912635330f6037faaa3556d490f3390eb.hip
|
// !!! This is a file automatically generated by hipify!!!
/// LSU EE 7722 GPU Microarchitecture
//
/// SOLUTION -- Homework 3 - Spring 2017
//
// Assignment: http://www.ece.lsu.edu/koppel/gp/2017/hw03.pdf
/// Documentation
//
// c++: http://en.cppreference.com
// CUDA: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <new>
#include <string>
#include <assert.h>
#include <time.h>
#include <ctype.h>
#include <unistd.h>
#include <stdlib.h>
#include <functional>
#include <hip/hip_runtime.h>
#include <gp/cuda-gpuinfo.h>
#include <nperf.h>
using namespace std;
inline double
time_fp()
{
struct timespec tp;
clock_gettime(CLOCK_REALTIME,&tp);
return ((double)tp.tv_sec)+((double)tp.tv_nsec) * 0.000000001;
}
// Matrix dimension. (Matrix size will be N by N.)
//
const int N = 16;
// Make it easy to switch between float and double for vertex and matrix
// elements.
//
typedef float Elt_Type;
const char* elt_type_str(float f){ return "float"; }
const char* elt_type_str(double f){ return "double"; }
const char* elt_type_str(int f){ return "int"; }
struct App
{
// Number of input and output matrices.
//
int n_mats;
// Host pointers to the input and output arrays, and to a CPU-computed
// output array used for checking results.
//
Elt_Type *h_a, *h_b, *h_out, *h_out_check;
//
// Note: h_in points to an array holding n_mats N by N-element matrices
// and so the total size of h_in is n_mats * N * N elements.
// GPU pointers to the input and output arrays.
//
Elt_Type *d_a, *d_b, *d_out;
};
// In host address space.
App app;
// In device constant address space.
__constant__ App d_app;
typedef void (*KPtr)(Elt_Type *dout);
extern "C" __global__ void
mxm_volk(Elt_Type* __restrict__ dout)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
// Convenience function for finding index of the element at row r,
// column c, in matrix i.
//
auto idx = [](int i,int r,int c) { return i * N*N + r * N + c; };
const int start = tid / N;
const int stop = d_app.n_mats;
const int inc = num_threads / N;
// Chunk Size: Number of columns of matrix A to load into shared memory.
//
const int CS = 32 / sizeof(Elt_Type);
// Column in matrix B assigned to this thread.
//
const int cb = threadIdx.x % N;
// Column offset to load when populating shared memory.
//
const int c0 = threadIdx.x % CS;
// First row to load when populating shared memory.
//
const int r0 = ( threadIdx.x % N ) / CS;
//
//
const int h0 = threadIdx.x / N;
// Number of times per matrix that shared memory will have to be loaded.
//
const int RS = N / CS;
const int MpB = 32 * 32 / N; // Matrices per block.
// Storage for buffering N by CS submatrix of matrix A.
//
__shared__ Elt_Type mat_a[N][MpB][CS];
for ( int h=start; h<stop; h += inc )
{
// Storage for column of output matrix.
//
Elt_Type elt[N];
for ( auto& e: elt ) e = 0;
for ( int cc=0; cc<N; cc += CS )
{
// Write shared memory with an N by CS submatrix of A.
//
for ( int rr = 0; rr<N; rr += RS )
mat_a[rr + r0][h0][c0] =
d_app.d_a[ idx( h, rr + r0, cc + c0 ) ];
if ( N > 32 ) __syncthreads();
for ( int rb=0; rb<CS; rb++ )
{
const int r = cc + rb; // Row in matrix B, column in mat A.
Elt_Type elt_rb_cb = d_app.d_b[ idx( h, r, cb ) ];
for ( int ra=0; ra<N; ra++ )
elt[ra] += mat_a[ra][h0][rb] * elt_rb_cb;
}
if ( N > 32 ) __syncthreads();
}
for ( int r=0; r<N; r++ )
dout[ idx( h, r, cb ) ] = elt[r];
}
}
template<int thd_p_col = 2 >
__global__ void
mxm_tpc(Elt_Type* __restrict__ dout)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
// Convenience function for finding index of the element at row r,
// column c, in matrix i.
//
auto idx = [](int i,int r,int c) { return i * N*N + r * N + c; };
/// SOLUTION
const int thd_p_mat = N * thd_p_col;
const int start = tid / thd_p_mat;
const int stop = d_app.n_mats;
const int inc = num_threads / thd_p_mat;
// Chunk Size: Number of columns of matrix A to load into shared memory.
//
const int CS = 32 / sizeof(Elt_Type);
// Column in matrix B assigned to this thread.
//
const int cb = ( threadIdx.x / thd_p_col ) % N;
// Column offset to load when populating shared memory.
//
const int c0 = threadIdx.x % CS;
// First row to load when populating shared memory.
//
const int r0 = ( threadIdx.x % thd_p_mat ) / CS;;
//
const int h0 = threadIdx.x / thd_p_mat;
// Number of times per matrix that shared memory will have to be loaded.
//
const int RS = thd_p_mat / CS;
const int MpB = 32 * 32 / thd_p_mat; // Matrices per block.
const int ra0 = threadIdx.x % thd_p_col;
// Storage for buffering N by CS submatrix of matrix A.
//
__shared__ Elt_Type mat_a[MpB][N][CS];
for ( int h=start; h<stop; h += inc )
{
// Storage for N/thd_p_col rows of a column of output matrix.
//
Elt_Type elt[N/thd_p_col];
for ( auto& e: elt ) e = 0;
for ( int cc=0; cc<N; cc += CS )
{
// Write shared memory with an N by CS submatrix of A.
//
if ( RS <= N || r0 < N )
for ( int rr = 0; rr<N; rr += RS )
mat_a[h0][rr + r0][c0] = d_app.d_a[ idx( h, rr + r0, cc + c0 ) ];
if ( thd_p_mat > 32 ) __syncthreads();
for ( int rb=0; rb<CS; rb++ )
{
const int r = cc + rb; // Row in matrix B, column in mat A.
Elt_Type elt_rb_cb = d_app.d_b[ idx( h, r, cb ) ];
for ( int ra=0; ra<N; ra += thd_p_col )
elt[ra/thd_p_col] += mat_a[h0][ra0+ra][rb] * elt_rb_cb;
}
if ( thd_p_mat > 32 ) __syncthreads();
}
for ( int r=0; r<N; r += thd_p_col )
dout[ idx( h, ra0+r, cb ) ] = elt[r/thd_p_col];
}
}
template<int tpc> bool mxm_tpc_block_size_okay(int bsize)
{
// Return true if mxm_tpc can run for a block size of bsize threads.
//
const int thd_p_mat = N * tpc;
return thd_p_mat <= bsize && bsize % thd_p_mat == 0;
};
GPU_Info
print_gpu_and_kernel_info()
{
GPU_Info info;
gpu_info_print();
// Determine which GPU to use. (For starters, if there's more than
// one, choose the one connected to the display.)
//
int dev = gpu_choose_index();
CE(hipSetDevice(dev));
printf("Using GPU %d\n",dev);
info.get_gpu_info(dev);
/// Add kernels to the list of kernels to run.
//
info.GET_INFO(mxm_volk);
info.GET_INFO(mxm_tpc<1>)
.block_size_okay_user_func = mxm_tpc_block_size_okay<1>;
info.GET_INFO(mxm_tpc<2>)
.block_size_okay_user_func = mxm_tpc_block_size_okay<2>;
info.GET_INFO(mxm_tpc<4>)
.block_size_okay_user_func = mxm_tpc_block_size_okay<4>;
info.GET_INFO(mxm_tpc<8>)
.block_size_okay_user_func = mxm_tpc_block_size_okay<8>;
// Print information about kernel.
//
printf("\nCUDA Kernel Resource Usage:\n");
for ( int i=0; i<info.num_kernels; i++ )
{
printf("For %s:\n", info.ki[i].name);
printf(" %6zd shared, %zd const, %zd loc, %d regs; "
"%d max threads per block.\n",
info.ki[i].cfa.sharedSizeBytes,
info.ki[i].cfa.constSizeBytes,
info.ki[i].cfa.localSizeBytes,
info.ki[i].cfa.numRegs,
info.ki[i].cfa.maxThreadsPerBlock);
}
return info;
}
int
main(int argc, char **argv)
{
const bool debug = false;
// Initialize the collection of metrics if GPU debugging is
// off. Metrics collected using the NPerf library which itself uses
// the NVIDIA CUPTI API to collect data from GPU performance (event)
// counters and to compute performance metrics in terms of the event
// data.
//
# ifdef __P_CUDA_DEBUG__
NPerf_init(false); //
# else
NPerf_init(true);
# endif
// Get info about GPU and each kernel.
//
GPU_Info info = print_gpu_and_kernel_info();
/// Indicate which metrics to collect.
//
// See the CUPTI User's Guide for a list of metrics, Section 1.6
// for the February 2016 guide.
//
NPerf_metric_collect("inst_executed");
NPerf_metric_collect("eligible_warps_per_cycle");
NPerf_metric_collect("gld_efficiency");
NPerf_metric_collect("gst_efficiency");
NPerf_metric_collect("gld_throughput");
NPerf_metric_collect("gst_throughput");
NPerf_metric_collect("shared_load_transactions_per_request");
NPerf_metric_collect("shared_store_transactions_per_request");
//
// Note: The more metrics that are collected, the more times a
// kernel will need to be run and the longer you'll have to wait for
// an answer.
const int num_mp = info.cuda_prop.multiProcessorCount;
const int nsq = N * N;
// Examine argument 1, block count, default is number of MPs.
// Negative value is a multiple of the number of MPs.
//
const int arg1_int = argc < 2 ? num_mp : atoi(argv[1]);
const int num_blocks =
arg1_int == 0 ? num_mp :
arg1_int < 0 ? -arg1_int * num_mp : arg1_int;
// Examine argument 2, number of threads per block.
// Zero means vary the number of threads in multiples of 32.
//
const int thd_per_block_arg = argc < 3 ? 0 : atoi(argv[2]);
const int thd_per_block_goal =
thd_per_block_arg == 0 ? 1024 : thd_per_block_arg;
const int num_threads = num_blocks * thd_per_block_goal;
const bool vary_warps = thd_per_block_arg == 0;
// Examine argument 3, size of array in MiB. Fractional values okay.
//
const int in_size_bytes_targ =
argc < 4 ? 1 << 24 : int( atof(argv[3]) * (1<<20) );
app.n_mats = in_size_bytes_targ / ( 2 * nsq * sizeof(app.h_a[0]) );
if ( num_threads <= 0 || app.n_mats <= 0 )
{
printf("Usage: %s [ NUM_CUDA_BLOCKS ] [THD_PER_BLOCK] "
"[DATA_SIZE_MiB]\n",
argv[0]);
exit(1);
}
const size_t a_size_elts = size_t(app.n_mats) * nsq;
const size_t a_size_bytes = a_size_elts * sizeof( app.h_a[0] );
const size_t out_size_elts = size_t(app.n_mats) * nsq;
const size_t out_size_bytes = out_size_elts * sizeof( app.h_out[0] );
// Amount of extra storage to allocate so that kernels can safely
// access data beyond the end of the input, avoiding the need for
// if statements.
//
const int overrun_size_elts = thd_per_block_goal * nsq;
const int overrun_size_bytes = overrun_size_elts * sizeof( app.h_out[0] );
// Allocate storage for CPU copy of data.
//
app.h_a = new Elt_Type[ a_size_elts ];
app.h_b = new Elt_Type[ a_size_elts ];
app.h_out = new Elt_Type[ out_size_elts ];
app.h_out_check = new Elt_Type[ out_size_elts ];
// Allocate storage for GPU copy of data.
//
CE( hipMalloc( &app.d_a, a_size_bytes + overrun_size_bytes ) );
CE( hipMalloc( &app.d_b, a_size_bytes + overrun_size_bytes ) );
CE( hipMalloc( &app.d_out, out_size_bytes + overrun_size_bytes ) );
const size_t in_size_bytes = 2 * a_size_bytes;
printf
("Input is %d pairs of %d x %d matrices of %s,\n",
app.n_mats, N, N, elt_type_str(Elt_Type(1)));
printf
(" total size %zd bytes (%.1f MiB).\n",
in_size_bytes, double(in_size_bytes)/(size_t(1)<<20));
// Define a convenience function that computes the index for the
// element at row r, column c in matrix i.
//
auto idx = [&](int i,int r,int c) { return i * nsq + r * N + c; };
// Initialize input array.
//
for ( int i=0; i<app.n_mats; i++ )
for ( int r=0; r<N; r++ )
for ( int c=0; c<N; c++ )
{
app.h_a[ idx(i,r,c) ] = debug ? Elt_Type(c) : drand48();
app.h_b[ idx(i,r,c) ] = debug ? Elt_Type(r) : drand48();
}
// Compute correct answer.
//
for ( int i=0; i<app.n_mats; i++ )
for ( int r=0; r<N; r++ )
for ( int c=0; c<N; c++ )
{
app.h_out_check[idx(i,r,c)] = 0;
for ( int k=0; k<N; k++ )
app.h_out_check[idx(i,r,c)] +=
app.h_a[idx(i,r,k)] * app.h_b[idx(i,k,c)];
}
// Compute the total number of MADD operations.
//
const int64_t num_madds = int64_t(N) * nsq * app.n_mats;
// Amount of data in and out of GPU chip.
//
const int64_t amt_data_bytes = in_size_bytes + out_size_bytes;
// Prepare events used for timing.
//
hipEvent_t gpu_start_ce, gpu_stop_ce;
CE(hipEventCreate(&gpu_start_ce));
CE(hipEventCreate(&gpu_stop_ce));
// Copy input array from CPU to GPU.
//
CE( hipMemcpy
( app.d_a, app.h_a, a_size_bytes, hipMemcpyHostToDevice ) );
CE( hipMemcpy
( app.d_b, app.h_b, a_size_bytes, hipMemcpyHostToDevice ) );
// Copy App structure to GPU.
//
CE( hipMemcpyToSymbol
( d_app, &app, sizeof(app), 0, hipMemcpyHostToDevice ) );
// Launch kernel multiple times and keep track of the best time.
printf("Launching %d blocks of %d threads with %.2f matrices per thread.\n",
num_blocks, thd_per_block_goal,
double(app.n_mats) * N / ( num_blocks * thd_per_block_goal ));
#ifdef __P_CUDA_DEBUG__
printf("*** THIS VERSION compiled with CUDA debugging on. Will be slow.\n");
#endif
auto check = [&]()
{
int err_count = 0;
for ( int i=0; i<app.n_mats; i++ )
for ( int r=0; r<N; r++ )
for ( int c=0; c<N; c++ )
{
const int ei = idx(i,r,c);
if ( fabs( app.h_out_check[ei] - app.h_out[ei] ) > 1e-5 )
{
err_count++;
if ( err_count < 5 )
printf
("Error at mat %d elt %d,%d: "
"%.7f != %.7f (correct)\n",
i, r, c, app.h_out[ei], app.h_out_check[ei] );
}
}
if ( err_count )
printf("Total errors %d\n", err_count);
};
for ( int kernel = 0; kernel < info.num_kernels; kernel++ )
{
Kernel_Info& k = info.ki[kernel];
bool heading_printed = false;
hipFuncAttributes& cfa = k.cfa;
const int wp_limit = cfa.maxThreadsPerBlock >> 5;
const int thd_limit = wp_limit << 5;
const int thd_per_block_no_vary = min(thd_per_block_goal,thd_limit);
const int wp_start = 4;
const int wp_stop = vary_warps ? 32 : wp_start;
const int wp_inc = 4;
for ( int wp_cnt = wp_start; wp_cnt <= wp_stop; wp_cnt += wp_inc )
{
const int thd_per_block =
vary_warps ? wp_cnt << 5 : thd_per_block_no_vary;
if ( ! k.block_size_okay(thd_per_block) ) continue;
// Zero the output array.
//
CE(hipMemset(app.d_out,0,out_size_bytes));
// Measure execution time starting "now", which is after data
// set to GPU. This is only used when NPerf is not active.
//
CE(hipEventRecord(gpu_start_ce,0));
// Launch Kernel
//
for ( NPerf_data_reset(); NPerf_need_run_get(); )
KPtr(info.ki[kernel]hipLaunchKernelGGL((.func_ptr)), dim3(num_blocks),dim3(thd_per_block), 0, 0,
app.d_out);
// Stop measuring execution time now, which is before is data
// returned from GPU.
//
CE(hipEventRecord(gpu_stop_ce,0));
CE(hipEventSynchronize(gpu_stop_ce));
float cuda_time_ms = -1.1;
CE(hipEventElapsedTime
(&cuda_time_ms,gpu_start_ce,gpu_stop_ce));
const double this_elapsed_time_s =
NPerf_metrics_collection_get()
? NPerf_kernel_et_get() : cuda_time_ms * 0.001;
const double thpt_compute_gflops =
num_madds / this_elapsed_time_s * 1e-9;
const double thpt_data_gbps =
amt_data_bytes / this_elapsed_time_s * 1e-9;
if ( vary_warps )
{
const int rate =
sizeof(Elt_Type) == sizeof(float)
? info.chip_sp_flops : info.chip_dp_flops;
const double comp_frac __attribute__((unused)) =
1e9 * thpt_compute_gflops / rate;
const double comm_frac =
1e9 * thpt_data_gbps / info.chip_bw_Bps;
const int max_st_len = 52;
// Number of warps, rounded up.
//
const int num_wps = ( thd_per_block + 31 ) >> 5;
// The maximum number of active blocks per MP for this
// kernel when launched with a block size of thd_per_block.
//
const int max_bl_per_mp =
info.get_max_active_blocks_per_mp(kernel,thd_per_block);
// Compute number of blocks available per MP based only on
// the number of blocks. This may be larger than the
// number of blocks that can run.
//
const int bl_per_mp_available =
0.999 + double(num_blocks) / num_mp;
// The number of active blocks is the minimum of what
// can fit and how many are available.
//
const int bl_per_mp =
min( bl_per_mp_available, max_bl_per_mp );
// Based on the number of blocks, compute the num ber of warps.
//
const int act_wps = num_wps * bl_per_mp;
if ( !heading_printed )
printf("Kernel %s:\n", info.ki[kernel].name);
heading_printed = true;
printf("%2d %2d wp %6.0f s %4.0f GF %4.0f GB/s %s\n",
num_wps, act_wps,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps,
string(int(comm_frac*max_st_len),'=').c_str());
printf("%2d %2d wp %6.0f s %4.0f GF %4.0f GB/s "
"%5.2f I/F %4.1f wp/c %3.0f%% %4.1f %4.1f\n",
num_wps, act_wps,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps,
NPerf_metric_value_get("inst_executed") * 32 / num_madds,
NPerf_metric_value_get("eligible_warps_per_cycle"),
NPerf_metric_value_get("gld_efficiency"),
NPerf_metric_value_get("shared_store_transactions_per_request"),
NPerf_metric_value_get("shared_load_transactions_per_request"));
} else {
printf
("%-10s %2d wp %7.0f s %4.0f GF %4.0f (%4.0f) GB/s "
"%5.2f I/F %5.1f%% %4.1f %4.1f\n",
info.ki[kernel].name,
(thd_per_block + 31 ) >> 5,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps,
( NPerf_metric_value_get("gld_throughput") +
NPerf_metric_value_get("gst_throughput") ) * 1e-9,
NPerf_metric_value_get("inst_executed") * 32 / num_madds,
// NPerf_metric_value_get("gld_efficiency"),
// NPerf_metric_value_get("gst_efficiency"),
NPerf_metric_value_get("eligible_warps_per_cycle"),
NPerf_metric_value_get("shared_load_transactions_per_request"),
NPerf_metric_value_get("shared_store_transactions_per_request")
);
if (0 )
printf("K %-15s %2d wp %11.3f s %8.3f GFLOPS %8.3f GB/s\n",
info.ki[kernel].name,
(thd_per_block + 31 ) >> 5,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps);
}
// Copy output array from GPU to CPU.
//
CE( hipMemcpy
( app.h_out, app.d_out, out_size_bytes,
hipMemcpyDeviceToHost) );
check();
}
}
}
|
6a40e95912635330f6037faaa3556d490f3390eb.cu
|
/// LSU EE 7722 GPU Microarchitecture
//
/// SOLUTION -- Homework 3 - Spring 2017
//
// Assignment: http://www.ece.lsu.edu/koppel/gp/2017/hw03.pdf
/// Documentation
//
// c++: http://en.cppreference.com
// CUDA: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <new>
#include <string>
#include <assert.h>
#include <time.h>
#include <ctype.h>
#include <unistd.h>
#include <stdlib.h>
#include <functional>
#include <cuda_runtime.h>
#include <gp/cuda-gpuinfo.h>
#include <nperf.h>
using namespace std;
inline double
time_fp()
{
struct timespec tp;
clock_gettime(CLOCK_REALTIME,&tp);
return ((double)tp.tv_sec)+((double)tp.tv_nsec) * 0.000000001;
}
// Matrix dimension. (Matrix size will be N by N.)
//
const int N = 16;
// Make it easy to switch between float and double for vertex and matrix
// elements.
//
typedef float Elt_Type;
const char* elt_type_str(float f){ return "float"; }
const char* elt_type_str(double f){ return "double"; }
const char* elt_type_str(int f){ return "int"; }
struct App
{
// Number of input and output matrices.
//
int n_mats;
// Host pointers to the input and output arrays, and to a CPU-computed
// output array used for checking results.
//
Elt_Type *h_a, *h_b, *h_out, *h_out_check;
//
// Note: h_in points to an array holding n_mats N by N-element matrices
// and so the total size of h_in is n_mats * N * N elements.
// GPU pointers to the input and output arrays.
//
Elt_Type *d_a, *d_b, *d_out;
};
// In host address space.
App app;
// In device constant address space.
__constant__ App d_app;
typedef void (*KPtr)(Elt_Type *dout);
extern "C" __global__ void
mxm_volk(Elt_Type* __restrict__ dout)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
// Convenience function for finding index of the element at row r,
// column c, in matrix i.
//
auto idx = [](int i,int r,int c) { return i * N*N + r * N + c; };
const int start = tid / N;
const int stop = d_app.n_mats;
const int inc = num_threads / N;
// Chunk Size: Number of columns of matrix A to load into shared memory.
//
const int CS = 32 / sizeof(Elt_Type);
// Column in matrix B assigned to this thread.
//
const int cb = threadIdx.x % N;
// Column offset to load when populating shared memory.
//
const int c0 = threadIdx.x % CS;
// First row to load when populating shared memory.
//
const int r0 = ( threadIdx.x % N ) / CS;
//
//
const int h0 = threadIdx.x / N;
// Number of times per matrix that shared memory will have to be loaded.
//
const int RS = N / CS;
const int MpB = 32 * 32 / N; // Matrices per block.
// Storage for buffering N by CS submatrix of matrix A.
//
__shared__ Elt_Type mat_a[N][MpB][CS];
for ( int h=start; h<stop; h += inc )
{
// Storage for column of output matrix.
//
Elt_Type elt[N];
for ( auto& e: elt ) e = 0;
for ( int cc=0; cc<N; cc += CS )
{
// Write shared memory with an N by CS submatrix of A.
//
for ( int rr = 0; rr<N; rr += RS )
mat_a[rr + r0][h0][c0] =
d_app.d_a[ idx( h, rr + r0, cc + c0 ) ];
if ( N > 32 ) __syncthreads();
for ( int rb=0; rb<CS; rb++ )
{
const int r = cc + rb; // Row in matrix B, column in mat A.
Elt_Type elt_rb_cb = d_app.d_b[ idx( h, r, cb ) ];
for ( int ra=0; ra<N; ra++ )
elt[ra] += mat_a[ra][h0][rb] * elt_rb_cb;
}
if ( N > 32 ) __syncthreads();
}
for ( int r=0; r<N; r++ )
dout[ idx( h, r, cb ) ] = elt[r];
}
}
template<int thd_p_col = 2 >
__global__ void
mxm_tpc(Elt_Type* __restrict__ dout)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
// Convenience function for finding index of the element at row r,
// column c, in matrix i.
//
auto idx = [](int i,int r,int c) { return i * N*N + r * N + c; };
/// SOLUTION
const int thd_p_mat = N * thd_p_col;
const int start = tid / thd_p_mat;
const int stop = d_app.n_mats;
const int inc = num_threads / thd_p_mat;
// Chunk Size: Number of columns of matrix A to load into shared memory.
//
const int CS = 32 / sizeof(Elt_Type);
// Column in matrix B assigned to this thread.
//
const int cb = ( threadIdx.x / thd_p_col ) % N;
// Column offset to load when populating shared memory.
//
const int c0 = threadIdx.x % CS;
// First row to load when populating shared memory.
//
const int r0 = ( threadIdx.x % thd_p_mat ) / CS;;
//
const int h0 = threadIdx.x / thd_p_mat;
// Number of times per matrix that shared memory will have to be loaded.
//
const int RS = thd_p_mat / CS;
const int MpB = 32 * 32 / thd_p_mat; // Matrices per block.
const int ra0 = threadIdx.x % thd_p_col;
// Storage for buffering N by CS submatrix of matrix A.
//
__shared__ Elt_Type mat_a[MpB][N][CS];
for ( int h=start; h<stop; h += inc )
{
// Storage for N/thd_p_col rows of a column of output matrix.
//
Elt_Type elt[N/thd_p_col];
for ( auto& e: elt ) e = 0;
for ( int cc=0; cc<N; cc += CS )
{
// Write shared memory with an N by CS submatrix of A.
//
if ( RS <= N || r0 < N )
for ( int rr = 0; rr<N; rr += RS )
mat_a[h0][rr + r0][c0] = d_app.d_a[ idx( h, rr + r0, cc + c0 ) ];
if ( thd_p_mat > 32 ) __syncthreads();
for ( int rb=0; rb<CS; rb++ )
{
const int r = cc + rb; // Row in matrix B, column in mat A.
Elt_Type elt_rb_cb = d_app.d_b[ idx( h, r, cb ) ];
for ( int ra=0; ra<N; ra += thd_p_col )
elt[ra/thd_p_col] += mat_a[h0][ra0+ra][rb] * elt_rb_cb;
}
if ( thd_p_mat > 32 ) __syncthreads();
}
for ( int r=0; r<N; r += thd_p_col )
dout[ idx( h, ra0+r, cb ) ] = elt[r/thd_p_col];
}
}
template<int tpc> bool mxm_tpc_block_size_okay(int bsize)
{
// Return true if mxm_tpc can run for a block size of bsize threads.
//
const int thd_p_mat = N * tpc;
return thd_p_mat <= bsize && bsize % thd_p_mat == 0;
};
GPU_Info
print_gpu_and_kernel_info()
{
GPU_Info info;
gpu_info_print();
// Determine which GPU to use. (For starters, if there's more than
// one, choose the one connected to the display.)
//
int dev = gpu_choose_index();
CE(cudaSetDevice(dev));
printf("Using GPU %d\n",dev);
info.get_gpu_info(dev);
/// Add kernels to the list of kernels to run.
//
info.GET_INFO(mxm_volk);
info.GET_INFO(mxm_tpc<1>)
.block_size_okay_user_func = mxm_tpc_block_size_okay<1>;
info.GET_INFO(mxm_tpc<2>)
.block_size_okay_user_func = mxm_tpc_block_size_okay<2>;
info.GET_INFO(mxm_tpc<4>)
.block_size_okay_user_func = mxm_tpc_block_size_okay<4>;
info.GET_INFO(mxm_tpc<8>)
.block_size_okay_user_func = mxm_tpc_block_size_okay<8>;
// Print information about kernel.
//
printf("\nCUDA Kernel Resource Usage:\n");
for ( int i=0; i<info.num_kernels; i++ )
{
printf("For %s:\n", info.ki[i].name);
printf(" %6zd shared, %zd const, %zd loc, %d regs; "
"%d max threads per block.\n",
info.ki[i].cfa.sharedSizeBytes,
info.ki[i].cfa.constSizeBytes,
info.ki[i].cfa.localSizeBytes,
info.ki[i].cfa.numRegs,
info.ki[i].cfa.maxThreadsPerBlock);
}
return info;
}
int
main(int argc, char **argv)
{
const bool debug = false;
// Initialize the collection of metrics if GPU debugging is
// off. Metrics collected using the NPerf library which itself uses
// the NVIDIA CUPTI API to collect data from GPU performance (event)
// counters and to compute performance metrics in terms of the event
// data.
//
# ifdef __P_CUDA_DEBUG__
NPerf_init(false); //
# else
NPerf_init(true);
# endif
// Get info about GPU and each kernel.
//
GPU_Info info = print_gpu_and_kernel_info();
/// Indicate which metrics to collect.
//
// See the CUPTI User's Guide for a list of metrics, Section 1.6
// for the February 2016 guide.
//
NPerf_metric_collect("inst_executed");
NPerf_metric_collect("eligible_warps_per_cycle");
NPerf_metric_collect("gld_efficiency");
NPerf_metric_collect("gst_efficiency");
NPerf_metric_collect("gld_throughput");
NPerf_metric_collect("gst_throughput");
NPerf_metric_collect("shared_load_transactions_per_request");
NPerf_metric_collect("shared_store_transactions_per_request");
//
// Note: The more metrics that are collected, the more times a
// kernel will need to be run and the longer you'll have to wait for
// an answer.
const int num_mp = info.cuda_prop.multiProcessorCount;
const int nsq = N * N;
// Examine argument 1, block count, default is number of MPs.
// Negative value is a multiple of the number of MPs.
//
const int arg1_int = argc < 2 ? num_mp : atoi(argv[1]);
const int num_blocks =
arg1_int == 0 ? num_mp :
arg1_int < 0 ? -arg1_int * num_mp : arg1_int;
// Examine argument 2, number of threads per block.
// Zero means vary the number of threads in multiples of 32.
//
const int thd_per_block_arg = argc < 3 ? 0 : atoi(argv[2]);
const int thd_per_block_goal =
thd_per_block_arg == 0 ? 1024 : thd_per_block_arg;
const int num_threads = num_blocks * thd_per_block_goal;
const bool vary_warps = thd_per_block_arg == 0;
// Examine argument 3, size of array in MiB. Fractional values okay.
//
const int in_size_bytes_targ =
argc < 4 ? 1 << 24 : int( atof(argv[3]) * (1<<20) );
app.n_mats = in_size_bytes_targ / ( 2 * nsq * sizeof(app.h_a[0]) );
if ( num_threads <= 0 || app.n_mats <= 0 )
{
printf("Usage: %s [ NUM_CUDA_BLOCKS ] [THD_PER_BLOCK] "
"[DATA_SIZE_MiB]\n",
argv[0]);
exit(1);
}
const size_t a_size_elts = size_t(app.n_mats) * nsq;
const size_t a_size_bytes = a_size_elts * sizeof( app.h_a[0] );
const size_t out_size_elts = size_t(app.n_mats) * nsq;
const size_t out_size_bytes = out_size_elts * sizeof( app.h_out[0] );
// Amount of extra storage to allocate so that kernels can safely
// access data beyond the end of the input, avoiding the need for
// if statements.
//
const int overrun_size_elts = thd_per_block_goal * nsq;
const int overrun_size_bytes = overrun_size_elts * sizeof( app.h_out[0] );
// Allocate storage for CPU copy of data.
//
app.h_a = new Elt_Type[ a_size_elts ];
app.h_b = new Elt_Type[ a_size_elts ];
app.h_out = new Elt_Type[ out_size_elts ];
app.h_out_check = new Elt_Type[ out_size_elts ];
// Allocate storage for GPU copy of data.
//
CE( cudaMalloc( &app.d_a, a_size_bytes + overrun_size_bytes ) );
CE( cudaMalloc( &app.d_b, a_size_bytes + overrun_size_bytes ) );
CE( cudaMalloc( &app.d_out, out_size_bytes + overrun_size_bytes ) );
const size_t in_size_bytes = 2 * a_size_bytes;
printf
("Input is %d pairs of %d x %d matrices of %s,\n",
app.n_mats, N, N, elt_type_str(Elt_Type(1)));
printf
(" total size %zd bytes (%.1f MiB).\n",
in_size_bytes, double(in_size_bytes)/(size_t(1)<<20));
// Define a convenience function that computes the index for the
// element at row r, column c in matrix i.
//
auto idx = [&](int i,int r,int c) { return i * nsq + r * N + c; };
// Initialize input array.
//
for ( int i=0; i<app.n_mats; i++ )
for ( int r=0; r<N; r++ )
for ( int c=0; c<N; c++ )
{
app.h_a[ idx(i,r,c) ] = debug ? Elt_Type(c) : drand48();
app.h_b[ idx(i,r,c) ] = debug ? Elt_Type(r) : drand48();
}
// Compute correct answer.
//
for ( int i=0; i<app.n_mats; i++ )
for ( int r=0; r<N; r++ )
for ( int c=0; c<N; c++ )
{
app.h_out_check[idx(i,r,c)] = 0;
for ( int k=0; k<N; k++ )
app.h_out_check[idx(i,r,c)] +=
app.h_a[idx(i,r,k)] * app.h_b[idx(i,k,c)];
}
// Compute the total number of MADD operations.
//
const int64_t num_madds = int64_t(N) * nsq * app.n_mats;
// Amount of data in and out of GPU chip.
//
const int64_t amt_data_bytes = in_size_bytes + out_size_bytes;
// Prepare events used for timing.
//
cudaEvent_t gpu_start_ce, gpu_stop_ce;
CE(cudaEventCreate(&gpu_start_ce));
CE(cudaEventCreate(&gpu_stop_ce));
// Copy input array from CPU to GPU.
//
CE( cudaMemcpy
( app.d_a, app.h_a, a_size_bytes, cudaMemcpyHostToDevice ) );
CE( cudaMemcpy
( app.d_b, app.h_b, a_size_bytes, cudaMemcpyHostToDevice ) );
// Copy App structure to GPU.
//
CE( cudaMemcpyToSymbol
( d_app, &app, sizeof(app), 0, cudaMemcpyHostToDevice ) );
// Launch kernel multiple times and keep track of the best time.
printf("Launching %d blocks of %d threads with %.2f matrices per thread.\n",
num_blocks, thd_per_block_goal,
double(app.n_mats) * N / ( num_blocks * thd_per_block_goal ));
#ifdef __P_CUDA_DEBUG__
printf("*** THIS VERSION compiled with CUDA debugging on. Will be slow.\n");
#endif
auto check = [&]()
{
int err_count = 0;
for ( int i=0; i<app.n_mats; i++ )
for ( int r=0; r<N; r++ )
for ( int c=0; c<N; c++ )
{
const int ei = idx(i,r,c);
if ( fabs( app.h_out_check[ei] - app.h_out[ei] ) > 1e-5 )
{
err_count++;
if ( err_count < 5 )
printf
("Error at mat %d elt %d,%d: "
"%.7f != %.7f (correct)\n",
i, r, c, app.h_out[ei], app.h_out_check[ei] );
}
}
if ( err_count )
printf("Total errors %d\n", err_count);
};
for ( int kernel = 0; kernel < info.num_kernels; kernel++ )
{
Kernel_Info& k = info.ki[kernel];
bool heading_printed = false;
cudaFuncAttributes& cfa = k.cfa;
const int wp_limit = cfa.maxThreadsPerBlock >> 5;
const int thd_limit = wp_limit << 5;
const int thd_per_block_no_vary = min(thd_per_block_goal,thd_limit);
const int wp_start = 4;
const int wp_stop = vary_warps ? 32 : wp_start;
const int wp_inc = 4;
for ( int wp_cnt = wp_start; wp_cnt <= wp_stop; wp_cnt += wp_inc )
{
const int thd_per_block =
vary_warps ? wp_cnt << 5 : thd_per_block_no_vary;
if ( ! k.block_size_okay(thd_per_block) ) continue;
// Zero the output array.
//
CE(cudaMemset(app.d_out,0,out_size_bytes));
// Measure execution time starting "now", which is after data
// set to GPU. This is only used when NPerf is not active.
//
CE(cudaEventRecord(gpu_start_ce,0));
// Launch Kernel
//
for ( NPerf_data_reset(); NPerf_need_run_get(); )
KPtr(info.ki[kernel].func_ptr)<<<num_blocks,thd_per_block>>>
(app.d_out);
// Stop measuring execution time now, which is before is data
// returned from GPU.
//
CE(cudaEventRecord(gpu_stop_ce,0));
CE(cudaEventSynchronize(gpu_stop_ce));
float cuda_time_ms = -1.1;
CE(cudaEventElapsedTime
(&cuda_time_ms,gpu_start_ce,gpu_stop_ce));
const double this_elapsed_time_s =
NPerf_metrics_collection_get()
? NPerf_kernel_et_get() : cuda_time_ms * 0.001;
const double thpt_compute_gflops =
num_madds / this_elapsed_time_s * 1e-9;
const double thpt_data_gbps =
amt_data_bytes / this_elapsed_time_s * 1e-9;
if ( vary_warps )
{
const int rate =
sizeof(Elt_Type) == sizeof(float)
? info.chip_sp_flops : info.chip_dp_flops;
const double comp_frac __attribute__((unused)) =
1e9 * thpt_compute_gflops / rate;
const double comm_frac =
1e9 * thpt_data_gbps / info.chip_bw_Bps;
const int max_st_len = 52;
// Number of warps, rounded up.
//
const int num_wps = ( thd_per_block + 31 ) >> 5;
// The maximum number of active blocks per MP for this
// kernel when launched with a block size of thd_per_block.
//
const int max_bl_per_mp =
info.get_max_active_blocks_per_mp(kernel,thd_per_block);
// Compute number of blocks available per MP based only on
// the number of blocks. This may be larger than the
// number of blocks that can run.
//
const int bl_per_mp_available =
0.999 + double(num_blocks) / num_mp;
// The number of active blocks is the minimum of what
// can fit and how many are available.
//
const int bl_per_mp =
min( bl_per_mp_available, max_bl_per_mp );
// Based on the number of blocks, compute the num ber of warps.
//
const int act_wps = num_wps * bl_per_mp;
if ( !heading_printed )
printf("Kernel %s:\n", info.ki[kernel].name);
heading_printed = true;
printf("%2d %2d wp %6.0f µs %4.0f GF %4.0f GB/s %s\n",
num_wps, act_wps,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps,
string(int(comm_frac*max_st_len),'=').c_str());
printf("%2d %2d wp %6.0f µs %4.0f GF %4.0f GB/s "
"%5.2f I/F %4.1f wp/c %3.0f%% %4.1f %4.1f\n",
num_wps, act_wps,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps,
NPerf_metric_value_get("inst_executed") * 32 / num_madds,
NPerf_metric_value_get("eligible_warps_per_cycle"),
NPerf_metric_value_get("gld_efficiency"),
NPerf_metric_value_get("shared_store_transactions_per_request"),
NPerf_metric_value_get("shared_load_transactions_per_request"));
} else {
printf
("%-10s %2d wp %7.0f µs %4.0f GF %4.0f (%4.0f) GB/s "
"%5.2f I/F %5.1f%% %4.1f %4.1f\n",
info.ki[kernel].name,
(thd_per_block + 31 ) >> 5,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps,
( NPerf_metric_value_get("gld_throughput") +
NPerf_metric_value_get("gst_throughput") ) * 1e-9,
NPerf_metric_value_get("inst_executed") * 32 / num_madds,
// NPerf_metric_value_get("gld_efficiency"),
// NPerf_metric_value_get("gst_efficiency"),
NPerf_metric_value_get("eligible_warps_per_cycle"),
NPerf_metric_value_get("shared_load_transactions_per_request"),
NPerf_metric_value_get("shared_store_transactions_per_request")
);
if (0 )
printf("K %-15s %2d wp %11.3f µs %8.3f GFLOPS %8.3f GB/s\n",
info.ki[kernel].name,
(thd_per_block + 31 ) >> 5,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps);
}
// Copy output array from GPU to CPU.
//
CE( cudaMemcpy
( app.h_out, app.d_out, out_size_bytes,
cudaMemcpyDeviceToHost) );
check();
}
}
}
|
c57c5a78ff13166683867708d48fbece0da50957.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count(
TUPLE *lt,
TUPLE *rt,
int *count,
int *r_p,
int *radix,
int *l_p,
int right,
int left
)
{
int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x;
//insert partition left table in shared memory
__shared__ TUPLE sub_lt[JOIN_SHARED];
for(int i=l_p[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<l_p[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){
if(j<JOIN_SHARED){
sub_lt[j].key = lt[i].key;
sub_lt[j].val = lt[i].val;
}
}
__syncthreads();
int temp=0;
int temp2 = r_p[radix[blockIdx.x]+1];
int temp3 = l_p[blockIdx.x+1] - l_p[blockIdx.x];
int count_x_temp = 0;
for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){
temp = rt[k].val;
for(int i=0; i<temp3 ;i++){
if(sub_lt[i].val == temp){
count_x_temp++;
}
}
}
count[x] = count_x_temp;
if(x == gridDim.x*blockDim.x-1){
count[x+1] = 0;
}
}
__global__ void join(
TUPLE *lt,
TUPLE *rt,
RESULT *jt,
int *count,
int *r_p,
int *radix,
int *l_p,
int right,
int left
)
{
//int x = blockIdx.x*blockDim.x + threadIdx.x;
int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x;
__shared__ TUPLE sub_lt[JOIN_SHARED];
for(int i=l_p[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<l_p[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){
if(j<JOIN_SHARED){
sub_lt[j].key = lt[i].key;
sub_lt[j].val = lt[i].val;
}
}
__syncthreads();
TUPLE temp;
int temp2 = r_p[radix[blockIdx.x]+1];
int temp3 = l_p[blockIdx.x+1] - l_p[blockIdx.x];
int tcount=count[x];
for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){
temp.key = rt[k].key;
temp.val = rt[k].val;
for(int i=0; i<temp3 ;i++){
if(sub_lt[i].val == temp.val){
jt[tcount].rkey = temp.key;
jt[tcount].rval = temp.val;
jt[tcount].lkey = sub_lt[i].key;
jt[tcount].lval = sub_lt[i].val;
tcount++;
}
}
}
}
}
|
c57c5a78ff13166683867708d48fbece0da50957.cu
|
#include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count(
TUPLE *lt,
TUPLE *rt,
int *count,
int *r_p,
int *radix,
int *l_p,
int right,
int left
)
{
int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x;
//insert partition left table in shared memory
__shared__ TUPLE sub_lt[JOIN_SHARED];
for(int i=l_p[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<l_p[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){
if(j<JOIN_SHARED){
sub_lt[j].key = lt[i].key;
sub_lt[j].val = lt[i].val;
}
}
__syncthreads();
int temp=0;
int temp2 = r_p[radix[blockIdx.x]+1];
int temp3 = l_p[blockIdx.x+1] - l_p[blockIdx.x];
int count_x_temp = 0;
for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){
temp = rt[k].val;
for(int i=0; i<temp3 ;i++){
if(sub_lt[i].val == temp){
count_x_temp++;
}
}
}
count[x] = count_x_temp;
if(x == gridDim.x*blockDim.x-1){
count[x+1] = 0;
}
}
__global__ void join(
TUPLE *lt,
TUPLE *rt,
RESULT *jt,
int *count,
int *r_p,
int *radix,
int *l_p,
int right,
int left
)
{
//int x = blockIdx.x*blockDim.x + threadIdx.x;
int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x;
__shared__ TUPLE sub_lt[JOIN_SHARED];
for(int i=l_p[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<l_p[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){
if(j<JOIN_SHARED){
sub_lt[j].key = lt[i].key;
sub_lt[j].val = lt[i].val;
}
}
__syncthreads();
TUPLE temp;
int temp2 = r_p[radix[blockIdx.x]+1];
int temp3 = l_p[blockIdx.x+1] - l_p[blockIdx.x];
int tcount=count[x];
for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){
temp.key = rt[k].key;
temp.val = rt[k].val;
for(int i=0; i<temp3 ;i++){
if(sub_lt[i].val == temp.val){
jt[tcount].rkey = temp.key;
jt[tcount].rval = temp.val;
jt[tcount].lkey = sub_lt[i].key;
jt[tcount].lval = sub_lt[i].val;
tcount++;
}
}
}
}
}
|
3245f389d03a1da9a9a615840984a29e3359f60a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <png++/image.hpp>
#include <png++/rgb_pixel.hpp>
#include <sys/times.h>
#include <iostream>
#define MERET 600
#define ITER_HAT 32000
__device__ int
mandel (int k, int j)
{
float a = -2.0, b = .7, c = -1.35, d = 1.35;
int szelesseg = MERET, magassag = MERET, iteraciosHatar = ITER_HAT;
float dx = (b - a) / szelesseg;
float dy = (d - c) / magassag;
float reC, imC, reZ, imZ, ujreZ, ujimZ;
int iteracio = 0;
reC = a + k * dx;
imC = d - j * dy;
reZ = 0.0;
imZ = 0.0;
iteracio = 0;
while (reZ * reZ + imZ * imZ < 4 && iteracio < iteraciosHatar)
{
ujreZ = reZ * reZ - imZ * imZ + reC;
ujimZ = 2 * reZ * imZ + imC;
reZ = ujreZ;
imZ = ujimZ;
++iteracio;
}
return iteracio;
}
/*
__global__ void
mandelkernel (int *kepadat)
{
int j = blockIdx.x;
int k = blockIdx.y;
kepadat[j + k * MERET] = mandel (j, k);
}
*/
__global__ void
mandelkernel (int *kepadat)
{
int tj = threadIdx.x;
int tk = threadIdx.y;
int j = blockIdx.x * 10 + tj;
int k = blockIdx.y * 10 + tk;
kepadat[j + k * MERET] = mandel (j, k);
}
void
cudamandel (int kepadat[MERET][MERET])
{
int *device_kepadat;
hipMalloc ((void **) &device_kepadat, MERET * MERET * sizeof (int));
dim3 grid (MERET / 10, MERET / 10);
dim3 tgrid (10, 10);
hipLaunchKernelGGL(( mandelkernel) , dim3(grid), dim3(tgrid) , 0, 0, device_kepadat);
hipMemcpy (kepadat, device_kepadat,
MERET * MERET * sizeof (int), hipMemcpyDeviceToHost);
hipFree (device_kepadat);
}
int
main (int argc, char *argv[])
{
clock_t delta = clock ();
struct tms tmsbuf1, tmsbuf2;
times (&tmsbuf1);
if (argc != 2)
{
std::cout << "Hasznalat: ./mandelpngc fajlnev";
return -1;
}
int kepadat[MERET][MERET];
cudamandel (kepadat);
png::image < png::rgb_pixel > kep (MERET, MERET);
for (int j = 0; j < MERET; ++j)
{
//sor = j;
for (int k = 0; k < MERET; ++k)
{
kep.set_pixel (k, j,
png::rgb_pixel (255 -
(255 * kepadat[j][k]) / ITER_HAT,
255 -
(255 * kepadat[j][k]) / ITER_HAT,
255 -
(255 * kepadat[j][k]) / ITER_HAT));
}
}
kep.write (argv[1]);
std::cout << argv[1] << " mentve" << std::endl;
times (&tmsbuf2);
std::cout << tmsbuf2.tms_utime - tmsbuf1.tms_utime
+ tmsbuf2.tms_stime - tmsbuf1.tms_stime << std::endl;
delta = clock () - delta;
std::cout << (float) delta / CLOCKS_PER_SEC << " sec" << std::endl;
}
|
3245f389d03a1da9a9a615840984a29e3359f60a.cu
|
#include <png++/image.hpp>
#include <png++/rgb_pixel.hpp>
#include <sys/times.h>
#include <iostream>
#define MERET 600
#define ITER_HAT 32000
__device__ int
mandel (int k, int j)
{
float a = -2.0, b = .7, c = -1.35, d = 1.35;
int szelesseg = MERET, magassag = MERET, iteraciosHatar = ITER_HAT;
float dx = (b - a) / szelesseg;
float dy = (d - c) / magassag;
float reC, imC, reZ, imZ, ujreZ, ujimZ;
int iteracio = 0;
reC = a + k * dx;
imC = d - j * dy;
reZ = 0.0;
imZ = 0.0;
iteracio = 0;
while (reZ * reZ + imZ * imZ < 4 && iteracio < iteraciosHatar)
{
ujreZ = reZ * reZ - imZ * imZ + reC;
ujimZ = 2 * reZ * imZ + imC;
reZ = ujreZ;
imZ = ujimZ;
++iteracio;
}
return iteracio;
}
/*
__global__ void
mandelkernel (int *kepadat)
{
int j = blockIdx.x;
int k = blockIdx.y;
kepadat[j + k * MERET] = mandel (j, k);
}
*/
__global__ void
mandelkernel (int *kepadat)
{
int tj = threadIdx.x;
int tk = threadIdx.y;
int j = blockIdx.x * 10 + tj;
int k = blockIdx.y * 10 + tk;
kepadat[j + k * MERET] = mandel (j, k);
}
void
cudamandel (int kepadat[MERET][MERET])
{
int *device_kepadat;
cudaMalloc ((void **) &device_kepadat, MERET * MERET * sizeof (int));
dim3 grid (MERET / 10, MERET / 10);
dim3 tgrid (10, 10);
mandelkernel <<< grid, tgrid >>> (device_kepadat);
cudaMemcpy (kepadat, device_kepadat,
MERET * MERET * sizeof (int), cudaMemcpyDeviceToHost);
cudaFree (device_kepadat);
}
int
main (int argc, char *argv[])
{
clock_t delta = clock ();
struct tms tmsbuf1, tmsbuf2;
times (&tmsbuf1);
if (argc != 2)
{
std::cout << "Hasznalat: ./mandelpngc fajlnev";
return -1;
}
int kepadat[MERET][MERET];
cudamandel (kepadat);
png::image < png::rgb_pixel > kep (MERET, MERET);
for (int j = 0; j < MERET; ++j)
{
//sor = j;
for (int k = 0; k < MERET; ++k)
{
kep.set_pixel (k, j,
png::rgb_pixel (255 -
(255 * kepadat[j][k]) / ITER_HAT,
255 -
(255 * kepadat[j][k]) / ITER_HAT,
255 -
(255 * kepadat[j][k]) / ITER_HAT));
}
}
kep.write (argv[1]);
std::cout << argv[1] << " mentve" << std::endl;
times (&tmsbuf2);
std::cout << tmsbuf2.tms_utime - tmsbuf1.tms_utime
+ tmsbuf2.tms_stime - tmsbuf1.tms_stime << std::endl;
delta = clock () - delta;
std::cout << (float) delta / CLOCKS_PER_SEC << " sec" << std::endl;
}
|
bc2ef58b0dbeb88f2a1a2853377d36c1717b4154.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h> // srand, rand
#include <time.h> // time
#include <vector>
#include <stdio.h>
// GLEW
#define GLEW_STATIC
#include <GL/glew.h>
// GLFW
#include <GLFW/glfw3.h>
// CUDA 8.0, only for test now
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// PBF
#include "include/config.h"
#include "include/constants.h"
#include "include/arcball_camera.h"
#include "include/gl_fix.h"
#include "include/point_drawer.h"
#include "include/renderer.h"
#include "include/shader_wrapper.h"
#include "include/shared_math.h"
#include "include/boundary_gpu.h"
#include "include/particle_system.h"
#include "include/pbf_solver.h"
#include "include/pbf_solver_gpu.h"
#include "include/spatial_hash.h"
////////////////////////////////////////////////////
// Window dimensions
const GLuint WIDTH = 1024, HEIGHT = 768;
// const GLuint WIDTH = 400, HEIGHT = 300;
float delta_time = 0.0f;
glm::vec3 world_size_dim{ 0.0f };
// Camera instance
pbf::ArcballCamera camera;
// Particle System instance
pbf::ParticleSystem ps;
pbf::BoundaryConstraintGpu boundary_constraint;
// PBF Solver instance
// pbf::PbfSolver solver;
pbf::PbfSolverGpu solver;
// SceneRender instance
pbf::SceneRenderer render;
////////////////////////////////////////////////////
// Configure the parameters of the world
void Configure(pbf::Config& config);
void InitParticles(const pbf::Config& config);
void InitDependencies();
////////////////////////////////////////////////////
// Callback function declarations
bool is_paused = false;
void KeyCallback(GLFWwindow* window, int key, int scancode, int action, int mode);
bool left_btn_pressed = false;
void MouseCallback(GLFWwindow* window, double xpos, double ypos);
float max_arcball_radius = 100.0f;
void ScrollCallback(GLFWwindow* window, double xoffset, double yoffset);
////////////////////////////////////////////////////
// A class that moves the x hi boundary back and forth
class MoveXBoundaryDriver {
public:
MoveXBoundaryDriver(pbf::BoundaryConstraintBase* bc) : bc_(bc) {}
void Configure(const pbf::Config& config) {
x_hi_index_ = 1;
x_vel_ = 8.0f;
const float world_size_x = config.Get<float>(pbf::WORLD_SIZE_X);
x_lo_ = world_size_x * 0.6f;
x_hi_ = world_size_x - 0.5f;
}
void Update(float dt) {
auto* bp = bc_->Get(x_hi_index_);
bp->position.x += (bp->velocity.x * dt);
if (bp->position.x < x_lo_) {
bp->position.x = x_lo_ + kFloatEpsilon;
bp->velocity.x = x_vel_;
}
else if (bp->position.x > x_hi_) {
bp->position.x = x_hi_ - kFloatEpsilon;
bp->velocity.x = -x_vel_;
}
}
private:
pbf::BoundaryConstraintBase* bc_;
float x_vel_;
float x_lo_;
float x_hi_;
size_t x_hi_index_;
};
////////////////////////////////////////////////////
// The MAIN function, from here we start the application and run the game loop
int main() {
std::cout << "Starting GLFW context, OpenGL 3.3" << std::endl;
// Init GLFW
glfwInit();
// Set all the required options for GLFW
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
GLFW_FORWARD_COMPATIBLE();
// Create a GLFWwindow object that we can use for GLFW's functions
GLFWwindow* window = glfwCreateWindow(WIDTH, HEIGHT, "PBF", nullptr, nullptr);
glfwMakeContextCurrent(window);
// Initialize PBF
pbf::Config config;
config.Load("Config/config.txt");
Configure(config);
InitParticles(config);
InitDependencies();
MoveXBoundaryDriver boundary_driver{ &boundary_constraint };
boundary_driver.Configure(config);
// Set the required callback functions
glfwSetKeyCallback(window, KeyCallback);
glfwSetCursorPosCallback(window, MouseCallback);
glfwSetScrollCallback(window, ScrollCallback);
// Set this to true so GLEW knows to use a modern approach to retrieving function pointers and extensions
glewExperimental = GL_TRUE;
// Initialize GLEW to setup the OpenGL Function pointers
glewInit();
// Define the viewport dimensions
int width, height;
glfwGetFramebufferSize(window, &width, &height);
glViewport(0, 0, width, height);
render.InitShaders("Shaders/vertex.vert", "Shaders/fragment.frag");
render.InitSpriteShaders("Shaders/sprite_vertex.vert", "Shaders/sprite_fragment.frag");
render.InitScene();
// Game loop
while (!glfwWindowShouldClose(window)) {
// Check if any events have been activiated (key pressed, mouse moved etc.)
// and call corresponding response functions
glfwPollEvents();
if (!is_paused) {
boundary_driver.Update(delta_time);
solver.Update(delta_time);
}
render.Render();
// Swap the screen buffers
glfwSwapBuffers(window);
}
// Terminate GLFW, clearing any resources allocated by GLFW.
glfwTerminate();
return 0;
}
////////////////////////////////////////////////////
void ConfigureCamera(const pbf::Config& config) {
// config camera
camera.SetStageSize(WIDTH, HEIGHT);
float radius = config.Get<float>(pbf::INIT_ARCBALL_RADIUS);
camera.SetArcballRadius(radius);
float sensitivity = 2.0f;
config.GetOptional(pbf::CAMERA_SENSITIVITY, &sensitivity);
camera.SetSensitivity(sensitivity);
max_arcball_radius = config.Get<float>(pbf::MAX_ARCBALL_RADIUS);
}
void ConfigureBoundaryConstraint(const pbf::Config& config) {
using pbf::vec_t;
const float world_size_x = world_size_dim.x;
const float world_size_y = world_size_dim.y;
const float world_size_z = world_size_dim.z;
pbf::BoundaryPlane bp;
// X lo
bp.position = vec_t{ 0.0f, 0.0f, 0.0f };
bp.velocity = vec_t{ 0.0f };
bp.normal = vec_t{ 1.0f, 0.0f, 0.0f };
boundary_constraint.Add(bp);
// X hi
bp.position = vec_t{ world_size_x, 0.0f, world_size_z };
bp.velocity = vec_t{ 0.0f };
bp.normal = vec_t{ -1.0f, 0.0f, 0.0f };
boundary_constraint.Add(bp);
// Z lo
bp.position = vec_t{ world_size_x, 0.0f, 0.0f };
bp.velocity = vec_t{ 0.0f };
bp.normal = vec_t{ 0.0f, 0.0f, 1.0f };
boundary_constraint.Add(bp);
// Z hi
bp.position = vec_t{ 0.0f, 0.0f, world_size_z };
bp.velocity = vec_t{ 0.0f };
bp.normal = vec_t{ 0.0f, 0.0f, -1.0f };
boundary_constraint.Add(bp);
// Y lo
bp.position = vec_t{ world_size_x, 0.0f, 0.0f };
bp.velocity = vec_t{ 0.0f };
bp.normal = vec_t{ 0.0f, 1.0f, 0.0f };
boundary_constraint.Add(bp);
// No Y hi, top not covered
}
void ConfigureSolver(const pbf::Config& config) {
pbf::PbfSolverConfig solver_config;
solver_config.h = config.Get<float>(pbf::H_KERNEL);
solver_config.mass = config.Get<float>(pbf::PARTICLE_MASS);
solver_config.rho_0 = config.Get<float>(pbf::RHO_0);
solver_config.epsilon = config.Get<float>(pbf::EPSILON);
solver_config.num_iters = config.Get<unsigned>(pbf::NUM_ITERATIONS);
solver_config.corr_delta_q_coeff = config.Get<float>(pbf::CORR_DELTA_Q_COEFF);
solver_config.corr_k = config.Get<float>(pbf::CORR_K);
solver_config.corr_n = config.Get<unsigned>(pbf::CORR_N);
solver_config.vorticity_epsilon = config.Get<float>(pbf::VORTICITY_EPSILON);
solver_config.xsph_c = config.Get<float>(pbf::XSPH_C);
solver_config.world_size_x = world_size_dim.x;
solver_config.world_size_y = world_size_dim.y;
solver_config.world_size_z = world_size_dim.z;
solver_config.spatial_hash_cell_size = config.Get<float>(pbf::SH_CELL_SIZE);
solver.Configure(solver_config);
}
void ConfigureRenderer(const pbf::Config& config) {
render.SetWorldSize(world_size_dim);
float fov = 45.0f;
config.GetOptional(pbf::FOV, &fov);
float aspect = (float)WIDTH / (float)HEIGHT;
float near = 0.1f;
config.GetOptional(pbf::PROJECTION_NEAR, &near);
float far = config.Get<float>(pbf::PROJECTION_FAR);
render.SetPespectiveProjection(fov, aspect, near, far);
}
void Configure(pbf::Config& config) {
delta_time = config.Get<float>(pbf::DELTA_TIME);
float world_size_x = config.Get<float>(pbf::WORLD_SIZE_X);
float world_size_y = config.Get<float>(pbf::WORLD_SIZE_Y);
float world_size_z = config.Get<float>(pbf::WORLD_SIZE_Z);
world_size_dim = { world_size_x, world_size_y, world_size_z };
ConfigureCamera(config);
ConfigureBoundaryConstraint(config);
ConfigureSolver(config);
ConfigureRenderer(config);
}
void InitParticles(const pbf::Config& config) {
srand(time(nullptr));
unsigned num_x = config.Get<unsigned>(pbf::NUM_PTCS_WIDTH);
unsigned num_z = config.Get<unsigned>(pbf::NUM_PTCS_HEIGHT);
unsigned num_y = config.Get<unsigned>(pbf::NUM_PTC_LAYERS);
float world_size_x = config.Get<float>(pbf::WORLD_SIZE_X);
float world_size_y = config.Get<float>(pbf::WORLD_SIZE_Y);
float world_size_z = config.Get<float>(pbf::WORLD_SIZE_Z);
float interval = config.Get<float>(pbf::PARTICLE_INTERVAL);
auto ComputeMargin = [=](float world_sz_dim, unsigned num_dim) -> float {
return (world_sz_dim - ((num_dim - 1) * interval)) * 0.5f;
};
for (unsigned y = 0; y < num_y; ++y) {
float margin_y = ComputeMargin(world_size_y, num_y);
for (unsigned z = 0; z < num_z; ++z) {
float margin_z = ComputeMargin(world_size_z, num_z);
for (unsigned x = 0; x < num_x; ++x) {
float margin_x = ComputeMargin(world_size_x, num_x);
float xf = margin_x + x * interval;
float yf = world_size_y - margin_y - y * interval;
float zf = margin_z + z * interval;
const glm::vec3 pos{ xf, yf, zf };
float vx = pbf::GenRandom(-0.5f, 0.5f);
float vy = pbf::GenRandom(0.0f, 1.0f);
float vz = pbf::GenRandom(-0.5f, 0.5f);
const glm::vec3 vel{ vx, vy, vz };
ps.Add(pos, vel);
}
}
}
}
void InitDependencies() {
solver.InitParticleSystems(&ps);
solver.SetBoundaryConstraint(&boundary_constraint);
render.SetCamera(&camera);
render.SetParticleSystem(&ps);
render.boundary_constraint_ = &boundary_constraint;
for (size_t i = 0; i < boundary_constraint.NumBoundaries(); ++i) {
pbf::SceneRenderer::BoundaryRecord brec;
brec.index = i;
if (i == 0 || i == 1) {
brec.v1_len = world_size_dim.z;
brec.v2_len = world_size_dim.y;
}
else if (i == 2 || i == 3) {
brec.v1_len = world_size_dim.x;
brec.v2_len = world_size_dim.y;
}
else {
brec.v1_len = world_size_dim.z;
brec.v2_len = world_size_dim.x;
}
render.boundary_records_.push_back(brec);
}
}
////////////////////////////////////////////////////
// Is called whenever a key is pressed/released via GLFW
void KeyCallback(GLFWwindow* window, int key, int scancode, int action, int mode) {
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
if (key == GLFW_KEY_SPACE && action == GLFW_PRESS)
is_paused = !is_paused;
}
void MouseCallback(GLFWwindow* window, double xpos, double ypos) {
int action = glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_LEFT);
if (action == GLFW_PRESS) {
if (!left_btn_pressed) {
std::cout << "mouse left button just pressed" << std::endl;
left_btn_pressed = true;
camera.OnMouseLeftClick(xpos, ypos);
}
else {
std::cout << "mouse left button dragging" << std::endl;
camera.OnMouseLeftDragging(xpos, ypos);
}
}
else {
if (left_btn_pressed) {
left_btn_pressed = false;
camera.OnMouseLeftRelease(xpos, ypos);
std::cout << "mouse left button released" << std::endl;
}
}
}
void ScrollCallback(GLFWwindow* window, double xoffset, double yoffset) {
float arcball_radius = camera.GetArcballRadius();
arcball_radius += yoffset * 0.25f;
std::cout << "scroll! yoffset: " << yoffset << ", radius: " << arcball_radius << std::endl;
if (arcball_radius > 0 && arcball_radius < max_arcball_radius) {
camera.SetArcballRadius(arcball_radius);
}
}
|
bc2ef58b0dbeb88f2a1a2853377d36c1717b4154.cu
|
#include <iostream>
#include <stdlib.h> // srand, rand
#include <time.h> // time
#include <vector>
#include <stdio.h>
// GLEW
#define GLEW_STATIC
#include <GL/glew.h>
// GLFW
#include <GLFW/glfw3.h>
// CUDA 8.0, only for test now
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// PBF
#include "include/config.h"
#include "include/constants.h"
#include "include/arcball_camera.h"
#include "include/gl_fix.h"
#include "include/point_drawer.h"
#include "include/renderer.h"
#include "include/shader_wrapper.h"
#include "include/shared_math.h"
#include "include/boundary_gpu.h"
#include "include/particle_system.h"
#include "include/pbf_solver.h"
#include "include/pbf_solver_gpu.h"
#include "include/spatial_hash.h"
////////////////////////////////////////////////////
// Window dimensions
const GLuint WIDTH = 1024, HEIGHT = 768;
// const GLuint WIDTH = 400, HEIGHT = 300;
float delta_time = 0.0f;
glm::vec3 world_size_dim{ 0.0f };
// Camera instance
pbf::ArcballCamera camera;
// Particle System instance
pbf::ParticleSystem ps;
pbf::BoundaryConstraintGpu boundary_constraint;
// PBF Solver instance
// pbf::PbfSolver solver;
pbf::PbfSolverGpu solver;
// SceneRender instance
pbf::SceneRenderer render;
////////////////////////////////////////////////////
// Configure the parameters of the world
void Configure(pbf::Config& config);
void InitParticles(const pbf::Config& config);
void InitDependencies();
////////////////////////////////////////////////////
// Callback function declarations
bool is_paused = false;
void KeyCallback(GLFWwindow* window, int key, int scancode, int action, int mode);
bool left_btn_pressed = false;
void MouseCallback(GLFWwindow* window, double xpos, double ypos);
float max_arcball_radius = 100.0f;
void ScrollCallback(GLFWwindow* window, double xoffset, double yoffset);
////////////////////////////////////////////////////
// A class that moves the x hi boundary back and forth
class MoveXBoundaryDriver {
public:
MoveXBoundaryDriver(pbf::BoundaryConstraintBase* bc) : bc_(bc) {}
void Configure(const pbf::Config& config) {
x_hi_index_ = 1;
x_vel_ = 8.0f;
const float world_size_x = config.Get<float>(pbf::WORLD_SIZE_X);
x_lo_ = world_size_x * 0.6f;
x_hi_ = world_size_x - 0.5f;
}
void Update(float dt) {
auto* bp = bc_->Get(x_hi_index_);
bp->position.x += (bp->velocity.x * dt);
if (bp->position.x < x_lo_) {
bp->position.x = x_lo_ + kFloatEpsilon;
bp->velocity.x = x_vel_;
}
else if (bp->position.x > x_hi_) {
bp->position.x = x_hi_ - kFloatEpsilon;
bp->velocity.x = -x_vel_;
}
}
private:
pbf::BoundaryConstraintBase* bc_;
float x_vel_;
float x_lo_;
float x_hi_;
size_t x_hi_index_;
};
////////////////////////////////////////////////////
// The MAIN function, from here we start the application and run the game loop
int main() {
std::cout << "Starting GLFW context, OpenGL 3.3" << std::endl;
// Init GLFW
glfwInit();
// Set all the required options for GLFW
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
GLFW_FORWARD_COMPATIBLE();
// Create a GLFWwindow object that we can use for GLFW's functions
GLFWwindow* window = glfwCreateWindow(WIDTH, HEIGHT, "PBF", nullptr, nullptr);
glfwMakeContextCurrent(window);
// Initialize PBF
pbf::Config config;
config.Load("Config/config.txt");
Configure(config);
InitParticles(config);
InitDependencies();
MoveXBoundaryDriver boundary_driver{ &boundary_constraint };
boundary_driver.Configure(config);
// Set the required callback functions
glfwSetKeyCallback(window, KeyCallback);
glfwSetCursorPosCallback(window, MouseCallback);
glfwSetScrollCallback(window, ScrollCallback);
// Set this to true so GLEW knows to use a modern approach to retrieving function pointers and extensions
glewExperimental = GL_TRUE;
// Initialize GLEW to setup the OpenGL Function pointers
glewInit();
// Define the viewport dimensions
int width, height;
glfwGetFramebufferSize(window, &width, &height);
glViewport(0, 0, width, height);
render.InitShaders("Shaders/vertex.vert", "Shaders/fragment.frag");
render.InitSpriteShaders("Shaders/sprite_vertex.vert", "Shaders/sprite_fragment.frag");
render.InitScene();
// Game loop
while (!glfwWindowShouldClose(window)) {
// Check if any events have been activiated (key pressed, mouse moved etc.)
// and call corresponding response functions
glfwPollEvents();
if (!is_paused) {
boundary_driver.Update(delta_time);
solver.Update(delta_time);
}
render.Render();
// Swap the screen buffers
glfwSwapBuffers(window);
}
// Terminate GLFW, clearing any resources allocated by GLFW.
glfwTerminate();
return 0;
}
////////////////////////////////////////////////////
void ConfigureCamera(const pbf::Config& config) {
// config camera
camera.SetStageSize(WIDTH, HEIGHT);
float radius = config.Get<float>(pbf::INIT_ARCBALL_RADIUS);
camera.SetArcballRadius(radius);
float sensitivity = 2.0f;
config.GetOptional(pbf::CAMERA_SENSITIVITY, &sensitivity);
camera.SetSensitivity(sensitivity);
max_arcball_radius = config.Get<float>(pbf::MAX_ARCBALL_RADIUS);
}
void ConfigureBoundaryConstraint(const pbf::Config& config) {
using pbf::vec_t;
const float world_size_x = world_size_dim.x;
const float world_size_y = world_size_dim.y;
const float world_size_z = world_size_dim.z;
pbf::BoundaryPlane bp;
// X lo
bp.position = vec_t{ 0.0f, 0.0f, 0.0f };
bp.velocity = vec_t{ 0.0f };
bp.normal = vec_t{ 1.0f, 0.0f, 0.0f };
boundary_constraint.Add(bp);
// X hi
bp.position = vec_t{ world_size_x, 0.0f, world_size_z };
bp.velocity = vec_t{ 0.0f };
bp.normal = vec_t{ -1.0f, 0.0f, 0.0f };
boundary_constraint.Add(bp);
// Z lo
bp.position = vec_t{ world_size_x, 0.0f, 0.0f };
bp.velocity = vec_t{ 0.0f };
bp.normal = vec_t{ 0.0f, 0.0f, 1.0f };
boundary_constraint.Add(bp);
// Z hi
bp.position = vec_t{ 0.0f, 0.0f, world_size_z };
bp.velocity = vec_t{ 0.0f };
bp.normal = vec_t{ 0.0f, 0.0f, -1.0f };
boundary_constraint.Add(bp);
// Y lo
bp.position = vec_t{ world_size_x, 0.0f, 0.0f };
bp.velocity = vec_t{ 0.0f };
bp.normal = vec_t{ 0.0f, 1.0f, 0.0f };
boundary_constraint.Add(bp);
// No Y hi, top not covered
}
void ConfigureSolver(const pbf::Config& config) {
pbf::PbfSolverConfig solver_config;
solver_config.h = config.Get<float>(pbf::H_KERNEL);
solver_config.mass = config.Get<float>(pbf::PARTICLE_MASS);
solver_config.rho_0 = config.Get<float>(pbf::RHO_0);
solver_config.epsilon = config.Get<float>(pbf::EPSILON);
solver_config.num_iters = config.Get<unsigned>(pbf::NUM_ITERATIONS);
solver_config.corr_delta_q_coeff = config.Get<float>(pbf::CORR_DELTA_Q_COEFF);
solver_config.corr_k = config.Get<float>(pbf::CORR_K);
solver_config.corr_n = config.Get<unsigned>(pbf::CORR_N);
solver_config.vorticity_epsilon = config.Get<float>(pbf::VORTICITY_EPSILON);
solver_config.xsph_c = config.Get<float>(pbf::XSPH_C);
solver_config.world_size_x = world_size_dim.x;
solver_config.world_size_y = world_size_dim.y;
solver_config.world_size_z = world_size_dim.z;
solver_config.spatial_hash_cell_size = config.Get<float>(pbf::SH_CELL_SIZE);
solver.Configure(solver_config);
}
void ConfigureRenderer(const pbf::Config& config) {
render.SetWorldSize(world_size_dim);
float fov = 45.0f;
config.GetOptional(pbf::FOV, &fov);
float aspect = (float)WIDTH / (float)HEIGHT;
float near = 0.1f;
config.GetOptional(pbf::PROJECTION_NEAR, &near);
float far = config.Get<float>(pbf::PROJECTION_FAR);
render.SetPespectiveProjection(fov, aspect, near, far);
}
void Configure(pbf::Config& config) {
delta_time = config.Get<float>(pbf::DELTA_TIME);
float world_size_x = config.Get<float>(pbf::WORLD_SIZE_X);
float world_size_y = config.Get<float>(pbf::WORLD_SIZE_Y);
float world_size_z = config.Get<float>(pbf::WORLD_SIZE_Z);
world_size_dim = { world_size_x, world_size_y, world_size_z };
ConfigureCamera(config);
ConfigureBoundaryConstraint(config);
ConfigureSolver(config);
ConfigureRenderer(config);
}
void InitParticles(const pbf::Config& config) {
srand(time(nullptr));
unsigned num_x = config.Get<unsigned>(pbf::NUM_PTCS_WIDTH);
unsigned num_z = config.Get<unsigned>(pbf::NUM_PTCS_HEIGHT);
unsigned num_y = config.Get<unsigned>(pbf::NUM_PTC_LAYERS);
float world_size_x = config.Get<float>(pbf::WORLD_SIZE_X);
float world_size_y = config.Get<float>(pbf::WORLD_SIZE_Y);
float world_size_z = config.Get<float>(pbf::WORLD_SIZE_Z);
float interval = config.Get<float>(pbf::PARTICLE_INTERVAL);
auto ComputeMargin = [=](float world_sz_dim, unsigned num_dim) -> float {
return (world_sz_dim - ((num_dim - 1) * interval)) * 0.5f;
};
for (unsigned y = 0; y < num_y; ++y) {
float margin_y = ComputeMargin(world_size_y, num_y);
for (unsigned z = 0; z < num_z; ++z) {
float margin_z = ComputeMargin(world_size_z, num_z);
for (unsigned x = 0; x < num_x; ++x) {
float margin_x = ComputeMargin(world_size_x, num_x);
float xf = margin_x + x * interval;
float yf = world_size_y - margin_y - y * interval;
float zf = margin_z + z * interval;
const glm::vec3 pos{ xf, yf, zf };
float vx = pbf::GenRandom(-0.5f, 0.5f);
float vy = pbf::GenRandom(0.0f, 1.0f);
float vz = pbf::GenRandom(-0.5f, 0.5f);
const glm::vec3 vel{ vx, vy, vz };
ps.Add(pos, vel);
}
}
}
}
void InitDependencies() {
solver.InitParticleSystems(&ps);
solver.SetBoundaryConstraint(&boundary_constraint);
render.SetCamera(&camera);
render.SetParticleSystem(&ps);
render.boundary_constraint_ = &boundary_constraint;
for (size_t i = 0; i < boundary_constraint.NumBoundaries(); ++i) {
pbf::SceneRenderer::BoundaryRecord brec;
brec.index = i;
if (i == 0 || i == 1) {
brec.v1_len = world_size_dim.z;
brec.v2_len = world_size_dim.y;
}
else if (i == 2 || i == 3) {
brec.v1_len = world_size_dim.x;
brec.v2_len = world_size_dim.y;
}
else {
brec.v1_len = world_size_dim.z;
brec.v2_len = world_size_dim.x;
}
render.boundary_records_.push_back(brec);
}
}
////////////////////////////////////////////////////
// Is called whenever a key is pressed/released via GLFW
void KeyCallback(GLFWwindow* window, int key, int scancode, int action, int mode) {
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
if (key == GLFW_KEY_SPACE && action == GLFW_PRESS)
is_paused = !is_paused;
}
void MouseCallback(GLFWwindow* window, double xpos, double ypos) {
int action = glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_LEFT);
if (action == GLFW_PRESS) {
if (!left_btn_pressed) {
std::cout << "mouse left button just pressed" << std::endl;
left_btn_pressed = true;
camera.OnMouseLeftClick(xpos, ypos);
}
else {
std::cout << "mouse left button dragging" << std::endl;
camera.OnMouseLeftDragging(xpos, ypos);
}
}
else {
if (left_btn_pressed) {
left_btn_pressed = false;
camera.OnMouseLeftRelease(xpos, ypos);
std::cout << "mouse left button released" << std::endl;
}
}
}
void ScrollCallback(GLFWwindow* window, double xoffset, double yoffset) {
float arcball_radius = camera.GetArcballRadius();
arcball_radius += yoffset * 0.25f;
std::cout << "scroll! yoffset: " << yoffset << ", radius: " << arcball_radius << std::endl;
if (arcball_radius > 0 && arcball_radius < max_arcball_radius) {
camera.SetArcballRadius(arcball_radius);
}
}
|
75608eca2259512fedba1830b73393b5e3fc357a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-07-29
* &Updated by: XIAO Tong (email: [email protected]) 2018-12-26
* Add summation by broadcasting.
*/
#include "SumDim.cuh"
#include "../../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
tensor summation of a tensor and a row vector
c = a + b * \beta
where a is a tensor and b is a row vector
>> a - pointer to the data array of a
>> b - pointer to the data array of b
>> c - pointer to the data array of c
>> rowNum - number of rows of a and c
>> colNum - number of columns of a and c (i.e., the size of b)
>> beta - the scaling factor
*/
template <class T, bool betaFired>
__global__
void KernelAddWithRow(T * a, T * b, T * c, int rowNum, int colNum, T beta)
{
__shared__ T bv[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(col >= colNum || row >= rowNum)
return;
if(threadIdx.y == 0)
bv[threadIdx.x] = b[col];
__syncthreads();
int offset = colNum * row + col;
if(betaFired)
c[offset] = a[offset] + bv[threadIdx.x] * beta;
else
c[offset] = a[offset] + bv[threadIdx.x];
}
/*
tensor summation of a tensor and a colum vector
c = a + b * \beta
where a is a tensor and b is a colum vector
>> a - pointer to the data array of a
>> b - pointer to the data array of b
>> c - pointer to the data array of c
>> rowNum - number of rows of a and c (i.e., the size of b)
>> colNum - number of columns of a and c
>> blockNum - size of a block (matrix), i.e., rowNum * colNum
>> blockNum - number of matrics
>> beta - the scaling factor
*/
template <class T, bool betaFired>
__global__
void KernelAddWithCol(T * a, T * b, T * c, int rowNum, int colNum, int blockSize, int blockNum, T beta)
{
__shared__ T bv[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int colIndex = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = colIndex % colNum;
int block = colIndex / colNum;
if(row >= rowNum || block >= blockNum)
return;
if(threadIdx.x == 0)
bv[threadIdx.y] = b[row];
__syncthreads();
int offset = block * blockSize + row * colNum + col;
if(betaFired)
c[offset] = a[offset] + bv[threadIdx.y] * beta;
else
c[offset] = a[offset] + bv[threadIdx.y];
}
/*
tensor summation (cuda version)
c = a + b * \beta
where the size of b is equal to the n-th dimension of a,
i.e., a is summed with b by broadcasting
>> a - a tensor
>> b - another tensor whose size is equal to that of dimension n of a
>> c - where we put a+b*\beta. we save it in a if c is NULL
>> n - the dimension index
>> beta - the scaling factor
*/
void _CudaSumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE beta)
{
CheckNTErrors(a && b && c, "Empty tensor input!");
CheckNTErrors(a->unitNum == c->unitNum, "Unmatched tensors in addition!");
CheckNTErrors(a->dataType == b->dataType && a->dataType == c->dataType,
"Unmatched data types in addition!");
CheckNTErrors(a->order == c->order, "The input tensors do not have the same order in addition!");
CheckNTErrors(!a->isSparse && !b->isSparse && !c->isSparse, "Dense tensors are required!");
CheckNTErrors(a->dimSize[n] == b->unitNum, "Wrong tensor size!");
int stride = 1;
int blockSize = a->dimSize[n];
int blockNum = 1;
for(int i = a->order - 1; i >= 0; i--){
if(i > n)
stride *= a->dimSize[i];
else if(i < n)
blockNum *= a->dimSize[i];
}
int cudaGrids[3];
int cudaBlocks[3];
int devIDBackup = 0;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE){
if(stride > 1){
GDevs.GetCudaThread2D(a->devID, stride * blockNum, blockSize, MAX_INT, cudaGrids, cudaBlocks);
if(beta == (DTYPE)1.0F)
hipLaunchKernelGGL(( KernelAddWithCol<DTYPE, false>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta);
else
hipLaunchKernelGGL(( KernelAddWithCol<DTYPE, true>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta);
}
else if(stride == 1){
GDevs.GetCudaThread2D(a->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
if(beta == (DTYPE)1.0F)
hipLaunchKernelGGL(( KernelAddWithRow<DTYPE, false>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockNum, blockSize, beta);
else
hipLaunchKernelGGL(( KernelAddWithRow<DTYPE, true>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockNum, blockSize, beta);
}
else{
ShowNTErrors("Something is wrong!");
}
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif
} // namespace nts(NiuTrans.Tensor)
|
75608eca2259512fedba1830b73393b5e3fc357a.cu
|
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-07-29
* &Updated by: XIAO Tong (email: [email protected]) 2018-12-26
* Add summation by broadcasting.
*/
#include "SumDim.cuh"
#include "../../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
tensor summation of a tensor and a row vector
c = a + b * \beta
where a is a tensor and b is a row vector
>> a - pointer to the data array of a
>> b - pointer to the data array of b
>> c - pointer to the data array of c
>> rowNum - number of rows of a and c
>> colNum - number of columns of a and c (i.e., the size of b)
>> beta - the scaling factor
*/
template <class T, bool betaFired>
__global__
void KernelAddWithRow(T * a, T * b, T * c, int rowNum, int colNum, T beta)
{
__shared__ T bv[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(col >= colNum || row >= rowNum)
return;
if(threadIdx.y == 0)
bv[threadIdx.x] = b[col];
__syncthreads();
int offset = colNum * row + col;
if(betaFired)
c[offset] = a[offset] + bv[threadIdx.x] * beta;
else
c[offset] = a[offset] + bv[threadIdx.x];
}
/*
tensor summation of a tensor and a colum vector
c = a + b * \beta
where a is a tensor and b is a colum vector
>> a - pointer to the data array of a
>> b - pointer to the data array of b
>> c - pointer to the data array of c
>> rowNum - number of rows of a and c (i.e., the size of b)
>> colNum - number of columns of a and c
>> blockNum - size of a block (matrix), i.e., rowNum * colNum
>> blockNum - number of matrics
>> beta - the scaling factor
*/
template <class T, bool betaFired>
__global__
void KernelAddWithCol(T * a, T * b, T * c, int rowNum, int colNum, int blockSize, int blockNum, T beta)
{
__shared__ T bv[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int colIndex = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = colIndex % colNum;
int block = colIndex / colNum;
if(row >= rowNum || block >= blockNum)
return;
if(threadIdx.x == 0)
bv[threadIdx.y] = b[row];
__syncthreads();
int offset = block * blockSize + row * colNum + col;
if(betaFired)
c[offset] = a[offset] + bv[threadIdx.y] * beta;
else
c[offset] = a[offset] + bv[threadIdx.y];
}
/*
tensor summation (cuda version)
c = a + b * \beta
where the size of b is equal to the n-th dimension of a,
i.e., a is summed with b by broadcasting
>> a - a tensor
>> b - another tensor whose size is equal to that of dimension n of a
>> c - where we put a+b*\beta. we save it in a if c is NULL
>> n - the dimension index
>> beta - the scaling factor
*/
void _CudaSumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE beta)
{
CheckNTErrors(a && b && c, "Empty tensor input!");
CheckNTErrors(a->unitNum == c->unitNum, "Unmatched tensors in addition!");
CheckNTErrors(a->dataType == b->dataType && a->dataType == c->dataType,
"Unmatched data types in addition!");
CheckNTErrors(a->order == c->order, "The input tensors do not have the same order in addition!");
CheckNTErrors(!a->isSparse && !b->isSparse && !c->isSparse, "Dense tensors are required!");
CheckNTErrors(a->dimSize[n] == b->unitNum, "Wrong tensor size!");
int stride = 1;
int blockSize = a->dimSize[n];
int blockNum = 1;
for(int i = a->order - 1; i >= 0; i--){
if(i > n)
stride *= a->dimSize[i];
else if(i < n)
blockNum *= a->dimSize[i];
}
int cudaGrids[3];
int cudaBlocks[3];
int devIDBackup = 0;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE){
if(stride > 1){
GDevs.GetCudaThread2D(a->devID, stride * blockNum, blockSize, MAX_INT, cudaGrids, cudaBlocks);
if(beta == (DTYPE)1.0F)
KernelAddWithCol<DTYPE, false> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta);
else
KernelAddWithCol<DTYPE, true> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta);
}
else if(stride == 1){
GDevs.GetCudaThread2D(a->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
if(beta == (DTYPE)1.0F)
KernelAddWithRow<DTYPE, false> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockNum, blockSize, beta);
else
KernelAddWithRow<DTYPE, true> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockNum, blockSize, beta);
}
else{
ShowNTErrors("Something is wrong!");
}
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif
} // namespace nts(NiuTrans.Tensor)
|
2263f3fdc2e8487c8b7a2e75fcbfde1f6830a0f5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// modified from
// https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
// This file is modified from
// https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu
// Cheng-Yang Fu
// [email protected]
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <cfloat>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename scalar_t>
__global__ void SigmoidFocalLossForward(const int nthreads,
const scalar_t *logits,
const long *targets,
const int num_classes,
const float gamma, const float alpha,
const int num, scalar_t *losses) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
int n = i / num_classes;
int d = i % num_classes; // current class[0~79];
int t = targets[n]; // target class [1~80];
// Decide it is positive or negative case.
scalar_t c1 = (t == (d + 1));
scalar_t c2 = (t >= 0 & t != (d + 1));
scalar_t zn = (1.0 - alpha);
scalar_t zp = (alpha);
// p = 1. / 1. + expf(-x); p = sigmoid(x)
scalar_t p = 1. / (1. + expf(-logits[i]));
// (1-p)**gamma * log(p) where
scalar_t term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN));
// p**gamma * log(1-p)
scalar_t term2 =
powf(p, gamma) *
(-1. * logits[i] * (logits[i] >= 0) -
logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0))));
losses[i] = 0.0;
losses[i] += -c1 * term1 * zp;
losses[i] += -c2 * term2 * zn;
} // CUDA_1D_KERNEL_LOOP
} // SigmoidFocalLossForward
template <typename scalar_t>
__global__ void SigmoidFocalLossBackward(
const int nthreads, const scalar_t *logits, const long *targets,
const scalar_t *d_losses, const int num_classes, const float gamma,
const float alpha, const int num, scalar_t *d_logits) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
int n = i / num_classes;
int d = i % num_classes; // current class[0~79];
int t = targets[n]; // target class [1~80], 0 is background;
// Decide it is positive or negative case.
scalar_t c1 = (t == (d + 1));
scalar_t c2 = (t >= 0 & t != (d + 1));
scalar_t zn = (1.0 - alpha);
scalar_t zp = (alpha);
// p = 1. / 1. + expf(-x); p = sigmoid(x)
scalar_t p = 1. / (1. + expf(-logits[i]));
// (1-p)**g * (1 - p - g*p*log(p)
scalar_t term1 =
powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN))));
// (p**g) * (g*(1-p)*log(1-p) - p)
scalar_t term2 =
powf(p, gamma) *
((-1. * logits[i] * (logits[i] >= 0) -
logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) *
(1. - p) * gamma -
p);
d_logits[i] = 0.0;
d_logits[i] += -c1 * term1 * zp;
d_logits[i] += -c2 * term2 * zn;
d_logits[i] = d_logits[i] * d_losses[i];
} // CUDA_1D_KERNEL_LOOP
} // SigmoidFocalLossBackward
at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits,
const at::Tensor &targets,
const int num_classes,
const float gamma, const float alpha) {
AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor");
AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor");
AT_ASSERTM(logits.dim() == 2, "logits should be NxClass");
const int num_samples = logits.size(0);
auto losses = at::empty({num_samples, logits.size(1)}, logits.options());
auto losses_size = num_samples * logits.size(1);
dim3 grid(::min(THCCeilDiv((long)losses_size, 512L), 4096L));
dim3 block(512);
if (losses.numel() == 0) {
THCudaCheck(hipGetLastError());
return losses;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
logits.scalar_type(), "SigmoidFocalLoss_forward", [&] {
hipLaunchKernelGGL(( SigmoidFocalLossForward<scalar_t>), dim3(grid), dim3(block), 0, 0,
losses_size, logits.contiguous().data<scalar_t>(),
targets.contiguous().data<long>(), num_classes, gamma, alpha,
num_samples, losses.data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return losses;
}
at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits,
const at::Tensor &targets,
const at::Tensor &d_losses,
const int num_classes,
const float gamma,
const float alpha) {
AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor");
AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor");
AT_ASSERTM(d_losses.type().is_cuda(), "d_losses must be a CUDA tensor");
AT_ASSERTM(logits.dim() == 2, "logits should be NxClass");
const int num_samples = logits.size(0);
AT_ASSERTM(logits.size(1) == num_classes,
"logits.size(1) should be num_classes");
auto d_logits = at::zeros({num_samples, num_classes}, logits.options());
auto d_logits_size = num_samples * logits.size(1);
dim3 grid(::min(THCCeilDiv((long)d_logits_size, 512L), 4096L));
dim3 block(512);
if (d_logits.numel() == 0) {
THCudaCheck(hipGetLastError());
return d_logits;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
logits.scalar_type(), "SigmoidFocalLoss_backward", [&] {
hipLaunchKernelGGL(( SigmoidFocalLossBackward<scalar_t>), dim3(grid), dim3(block), 0, 0,
d_logits_size, logits.contiguous().data<scalar_t>(),
targets.contiguous().data<long>(),
d_losses.contiguous().data<scalar_t>(), num_classes, gamma, alpha,
num_samples, d_logits.data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return d_logits;
}
|
2263f3fdc2e8487c8b7a2e75fcbfde1f6830a0f5.cu
|
// modified from
// https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
// This file is modified from
// https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu
// Cheng-Yang Fu
// [email protected]
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cfloat>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename scalar_t>
__global__ void SigmoidFocalLossForward(const int nthreads,
const scalar_t *logits,
const long *targets,
const int num_classes,
const float gamma, const float alpha,
const int num, scalar_t *losses) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
int n = i / num_classes;
int d = i % num_classes; // current class[0~79];
int t = targets[n]; // target class [1~80];
// Decide it is positive or negative case.
scalar_t c1 = (t == (d + 1));
scalar_t c2 = (t >= 0 & t != (d + 1));
scalar_t zn = (1.0 - alpha);
scalar_t zp = (alpha);
// p = 1. / 1. + expf(-x); p = sigmoid(x)
scalar_t p = 1. / (1. + expf(-logits[i]));
// (1-p)**gamma * log(p) where
scalar_t term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN));
// p**gamma * log(1-p)
scalar_t term2 =
powf(p, gamma) *
(-1. * logits[i] * (logits[i] >= 0) -
logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0))));
losses[i] = 0.0;
losses[i] += -c1 * term1 * zp;
losses[i] += -c2 * term2 * zn;
} // CUDA_1D_KERNEL_LOOP
} // SigmoidFocalLossForward
template <typename scalar_t>
__global__ void SigmoidFocalLossBackward(
const int nthreads, const scalar_t *logits, const long *targets,
const scalar_t *d_losses, const int num_classes, const float gamma,
const float alpha, const int num, scalar_t *d_logits) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
int n = i / num_classes;
int d = i % num_classes; // current class[0~79];
int t = targets[n]; // target class [1~80], 0 is background;
// Decide it is positive or negative case.
scalar_t c1 = (t == (d + 1));
scalar_t c2 = (t >= 0 & t != (d + 1));
scalar_t zn = (1.0 - alpha);
scalar_t zp = (alpha);
// p = 1. / 1. + expf(-x); p = sigmoid(x)
scalar_t p = 1. / (1. + expf(-logits[i]));
// (1-p)**g * (1 - p - g*p*log(p)
scalar_t term1 =
powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN))));
// (p**g) * (g*(1-p)*log(1-p) - p)
scalar_t term2 =
powf(p, gamma) *
((-1. * logits[i] * (logits[i] >= 0) -
logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) *
(1. - p) * gamma -
p);
d_logits[i] = 0.0;
d_logits[i] += -c1 * term1 * zp;
d_logits[i] += -c2 * term2 * zn;
d_logits[i] = d_logits[i] * d_losses[i];
} // CUDA_1D_KERNEL_LOOP
} // SigmoidFocalLossBackward
at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits,
const at::Tensor &targets,
const int num_classes,
const float gamma, const float alpha) {
AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor");
AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor");
AT_ASSERTM(logits.dim() == 2, "logits should be NxClass");
const int num_samples = logits.size(0);
auto losses = at::empty({num_samples, logits.size(1)}, logits.options());
auto losses_size = num_samples * logits.size(1);
dim3 grid(std::min(THCCeilDiv((long)losses_size, 512L), 4096L));
dim3 block(512);
if (losses.numel() == 0) {
THCudaCheck(cudaGetLastError());
return losses;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
logits.scalar_type(), "SigmoidFocalLoss_forward", [&] {
SigmoidFocalLossForward<scalar_t><<<grid, block>>>(
losses_size, logits.contiguous().data<scalar_t>(),
targets.contiguous().data<long>(), num_classes, gamma, alpha,
num_samples, losses.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return losses;
}
at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits,
const at::Tensor &targets,
const at::Tensor &d_losses,
const int num_classes,
const float gamma,
const float alpha) {
AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor");
AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor");
AT_ASSERTM(d_losses.type().is_cuda(), "d_losses must be a CUDA tensor");
AT_ASSERTM(logits.dim() == 2, "logits should be NxClass");
const int num_samples = logits.size(0);
AT_ASSERTM(logits.size(1) == num_classes,
"logits.size(1) should be num_classes");
auto d_logits = at::zeros({num_samples, num_classes}, logits.options());
auto d_logits_size = num_samples * logits.size(1);
dim3 grid(std::min(THCCeilDiv((long)d_logits_size, 512L), 4096L));
dim3 block(512);
if (d_logits.numel() == 0) {
THCudaCheck(cudaGetLastError());
return d_logits;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
logits.scalar_type(), "SigmoidFocalLoss_backward", [&] {
SigmoidFocalLossBackward<scalar_t><<<grid, block>>>(
d_logits_size, logits.contiguous().data<scalar_t>(),
targets.contiguous().data<long>(),
d_losses.contiguous().data<scalar_t>(), num_classes, gamma, alpha,
num_samples, d_logits.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return d_logits;
}
|
9409be9c8899fc315028421d4767a94ed7c95753.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_a;
int xdim0_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_a;
int ydim0_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_a;
int xdim1_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_a;
int ydim1_update_halo_kernel4_plus_2_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_2_a*(y)+xdim0_update_halo_kernel4_plus_2_a*ydim0_update_halo_kernel4_plus_2_a*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_2_a*(y)+xdim1_update_halo_kernel4_plus_2_a*ydim1_update_halo_kernel4_plus_2_a*(z))
//user function
__device__
inline void update_halo_kernel4_plus_2_a_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) {
if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(2,0,0)];
if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(2,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_a(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_plus_2_a + idx_z * 1*1 * xdim0_update_halo_kernel4_plus_2_a * ydim0_update_halo_kernel4_plus_2_a;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_plus_2_a + idx_z * 1*1 * xdim1_update_halo_kernel4_plus_2_a * ydim1_update_halo_kernel4_plus_2_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel4_plus_2_a_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,76)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(76,"update_halo_kernel4_plus_2_a");
OPS_kernels[76].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_2_a_h || ydim0 != ydim0_update_halo_kernel4_plus_2_a_h || xdim1 != xdim1_update_halo_kernel4_plus_2_a_h || ydim1 != ydim1_update_halo_kernel4_plus_2_a_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel4_plus_2_a, &xdim0, sizeof(int) );
xdim0_update_halo_kernel4_plus_2_a_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel4_plus_2_a, &ydim0, sizeof(int) );
ydim0_update_halo_kernel4_plus_2_a_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel4_plus_2_a, &xdim1, sizeof(int) );
xdim1_update_halo_kernel4_plus_2_a_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel4_plus_2_a, &ydim1, sizeof(int) );
ydim1_update_halo_kernel4_plus_2_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[76].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_a), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[76].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[76].mpi_time += t2-t1;
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 76;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 76;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel4_plus_2_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(76,"update_halo_kernel4_plus_2_a");
}
ops_enqueue_kernel(desc);
}
#endif
|
9409be9c8899fc315028421d4767a94ed7c95753.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_a;
int xdim0_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_a;
int ydim0_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_a;
int xdim1_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_a;
int ydim1_update_halo_kernel4_plus_2_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_2_a*(y)+xdim0_update_halo_kernel4_plus_2_a*ydim0_update_halo_kernel4_plus_2_a*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_2_a*(y)+xdim1_update_halo_kernel4_plus_2_a*ydim1_update_halo_kernel4_plus_2_a*(z))
//user function
__device__
inline void update_halo_kernel4_plus_2_a_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) {
if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(2,0,0)];
if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(2,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_a(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_plus_2_a + idx_z * 1*1 * xdim0_update_halo_kernel4_plus_2_a * ydim0_update_halo_kernel4_plus_2_a;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_plus_2_a + idx_z * 1*1 * xdim1_update_halo_kernel4_plus_2_a * ydim1_update_halo_kernel4_plus_2_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel4_plus_2_a_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,76)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(76,"update_halo_kernel4_plus_2_a");
OPS_kernels[76].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_2_a_h || ydim0 != ydim0_update_halo_kernel4_plus_2_a_h || xdim1 != xdim1_update_halo_kernel4_plus_2_a_h || ydim1 != ydim1_update_halo_kernel4_plus_2_a_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel4_plus_2_a, &xdim0, sizeof(int) );
xdim0_update_halo_kernel4_plus_2_a_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel4_plus_2_a, &ydim0, sizeof(int) );
ydim0_update_halo_kernel4_plus_2_a_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel4_plus_2_a, &xdim1, sizeof(int) );
xdim1_update_halo_kernel4_plus_2_a_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel4_plus_2_a, &ydim1, sizeof(int) );
ydim1_update_halo_kernel4_plus_2_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[76].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel4_plus_2_a<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[76].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[76].mpi_time += t2-t1;
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 76;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 76;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel4_plus_2_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(76,"update_halo_kernel4_plus_2_a");
}
ops_enqueue_kernel(desc);
}
#endif
|
bd993a33f47187d99fa21887ebe4aab7f29a1203.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
bd993a33f47187d99fa21887ebe4aab7f29a1203.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
ca8c53b382ac9781f273e00051ed1c90cc6956f9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
#include <shrQATest.h>
//======================== THRUST Libs
// http://code.google.com/p/thrust/
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <cmath>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
// includes, kernels
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int runMultiGPUExampleForSVM( ) {
// Query device properties
hipDeviceProp_t prop[64];
int gpuid_tesla[64]; // we want to find the first two GPU's that can support P2P
int gpu_count = 0; // GPUs that meet the criteria
int gpu_n;
cutilSafeCall(hipGetDeviceCount(&gpu_n));
printf("CUDA-capable device count: %i\n", gpu_n);
for (int i = 0; i < gpu_n; i++) {
cutilSafeCall(hipGetDeviceProperties(&prop[i], i));
// Only Tesla boards based on Fermi can support P2P
if ((!STRNCASECMP(prop[i].name, "Tesla", 5)) && (prop[i].major >= 2)
#ifdef _WIN32
// on Windows (64-bit), the Tesla Compute Cluster driver for windows must be enabled
&& prop[i].tccDriver
#endif
)
{
// This is an array of P2P capable GPUs
gpuid_tesla[gpu_count++] = i;
}
printf("> GPU%d = \"%15s\" capable of Peer-to-Peer (P2P)\n", i,
prop[i].name);
}
// Enable peer access
printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid_tesla[0],
gpuid_tesla[1]);
cutilSafeCall(hipSetDevice(gpuid_tesla[0]));
cutilSafeCall(hipDeviceEnablePeerAccess(gpuid_tesla[1], gpuid_tesla[0]));
cutilSafeCall(hipSetDevice(gpuid_tesla[1]));
cutilSafeCall(hipDeviceEnablePeerAccess(gpuid_tesla[0], gpuid_tesla[0]));
// Check that we got UVA on both devices
printf("Checking GPU%d and GPU%d for UVA capabilities...\n",
gpuid_tesla[0], gpuid_tesla[1]);
const bool has_uva = (prop[gpuid_tesla[0]].unifiedAddressing
&& prop[gpuid_tesla[1]].unifiedAddressing);
printf("> %s (GPU%d) supports UVA: %s\n", prop[gpuid_tesla[0]].name,
gpuid_tesla[0], (prop[gpuid_tesla[0]].unifiedAddressing ? "Yes"
: "No"));
printf("> %s (GPU%d) supports UVA: %s\n", prop[gpuid_tesla[1]].name,
gpuid_tesla[1], (prop[gpuid_tesla[1]].unifiedAddressing ? "Yes"
: "No"));
if (has_uva) {
printf("Both GPUs can support UVA, enabling...\n");
} else {
printf(
"At least one of the two GPUs does NOT support UVA, waiving test.\n");
printf("PASSED\n");
exit(EXIT_SUCCESS);
}
// Allocate buffers
const size_t buf_size = 1024 * 1024 * 512 * sizeof(float);
printf("Allocating buffers (%iMB on GPU%d, GPU%d and CPU Host)...\n", int(buf_size / 1024 / 1024), gpuid_tesla[0], gpuid_tesla[1]);
cutilSafeCall(hipSetDevice(gpuid_tesla[0]));
printf("choosing device 0\n");
// manipulate memory
// deallocate with device_free
thrust::device_ptr<float> d_dev0 = thrust::device_malloc<float>(1024 * 1024 * 512);
printf("choosing device 1\n");
cutilSafeCall(hipSetDevice(gpuid_tesla[1]));
thrust::device_ptr<float> d_dev1 = thrust::device_malloc<float>(1024 * 1024 * 512);
printf("done\n");
// thrust::host_vector<float> h_res0 = d_dev0;
// thrust::host_vector<float> h_res1 = d_dev1;
//
// printf("suma: %f\n",h_res0[0]+h_res1[0]);
// cutilSafeCall(hipSetDevice(gpuid_tesla[1]));
// float* g1;
// cutilSafeCall(hipMalloc(&g1, buf_size));
// float* h0;
// cutilSafeCall(hipHostMalloc(&h0, buf_size)); // Automatically portable with UVA
cutilSafeCall(hipSetDevice(gpuid_tesla[0]));
thrust::device_free(d_dev0 );
cutilSafeCall(hipSetDevice(gpuid_tesla[1]));
thrust::device_free(d_dev1 );
return 1;
}
|
ca8c53b382ac9781f273e00051ed1c90cc6956f9.cu
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
#include <shrQATest.h>
//======================== THRUST Libs
// http://code.google.com/p/thrust/
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <cmath>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
// includes, kernels
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int runMultiGPUExampleForSVM( ) {
// Query device properties
cudaDeviceProp prop[64];
int gpuid_tesla[64]; // we want to find the first two GPU's that can support P2P
int gpu_count = 0; // GPUs that meet the criteria
int gpu_n;
cutilSafeCall(cudaGetDeviceCount(&gpu_n));
printf("CUDA-capable device count: %i\n", gpu_n);
for (int i = 0; i < gpu_n; i++) {
cutilSafeCall(cudaGetDeviceProperties(&prop[i], i));
// Only Tesla boards based on Fermi can support P2P
if ((!STRNCASECMP(prop[i].name, "Tesla", 5)) && (prop[i].major >= 2)
#ifdef _WIN32
// on Windows (64-bit), the Tesla Compute Cluster driver for windows must be enabled
&& prop[i].tccDriver
#endif
)
{
// This is an array of P2P capable GPUs
gpuid_tesla[gpu_count++] = i;
}
printf("> GPU%d = \"%15s\" capable of Peer-to-Peer (P2P)\n", i,
prop[i].name);
}
// Enable peer access
printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid_tesla[0],
gpuid_tesla[1]);
cutilSafeCall(cudaSetDevice(gpuid_tesla[0]));
cutilSafeCall(cudaDeviceEnablePeerAccess(gpuid_tesla[1], gpuid_tesla[0]));
cutilSafeCall(cudaSetDevice(gpuid_tesla[1]));
cutilSafeCall(cudaDeviceEnablePeerAccess(gpuid_tesla[0], gpuid_tesla[0]));
// Check that we got UVA on both devices
printf("Checking GPU%d and GPU%d for UVA capabilities...\n",
gpuid_tesla[0], gpuid_tesla[1]);
const bool has_uva = (prop[gpuid_tesla[0]].unifiedAddressing
&& prop[gpuid_tesla[1]].unifiedAddressing);
printf("> %s (GPU%d) supports UVA: %s\n", prop[gpuid_tesla[0]].name,
gpuid_tesla[0], (prop[gpuid_tesla[0]].unifiedAddressing ? "Yes"
: "No"));
printf("> %s (GPU%d) supports UVA: %s\n", prop[gpuid_tesla[1]].name,
gpuid_tesla[1], (prop[gpuid_tesla[1]].unifiedAddressing ? "Yes"
: "No"));
if (has_uva) {
printf("Both GPUs can support UVA, enabling...\n");
} else {
printf(
"At least one of the two GPUs does NOT support UVA, waiving test.\n");
printf("PASSED\n");
exit(EXIT_SUCCESS);
}
// Allocate buffers
const size_t buf_size = 1024 * 1024 * 512 * sizeof(float);
printf("Allocating buffers (%iMB on GPU%d, GPU%d and CPU Host)...\n", int(buf_size / 1024 / 1024), gpuid_tesla[0], gpuid_tesla[1]);
cutilSafeCall(cudaSetDevice(gpuid_tesla[0]));
printf("choosing device 0\n");
// manipulate memory
// deallocate with device_free
thrust::device_ptr<float> d_dev0 = thrust::device_malloc<float>(1024 * 1024 * 512);
printf("choosing device 1\n");
cutilSafeCall(cudaSetDevice(gpuid_tesla[1]));
thrust::device_ptr<float> d_dev1 = thrust::device_malloc<float>(1024 * 1024 * 512);
printf("done\n");
// thrust::host_vector<float> h_res0 = d_dev0;
// thrust::host_vector<float> h_res1 = d_dev1;
//
// printf("suma: %f\n",h_res0[0]+h_res1[0]);
// cutilSafeCall(cudaSetDevice(gpuid_tesla[1]));
// float* g1;
// cutilSafeCall(cudaMalloc(&g1, buf_size));
// float* h0;
// cutilSafeCall(cudaMallocHost(&h0, buf_size)); // Automatically portable with UVA
cutilSafeCall(cudaSetDevice(gpuid_tesla[0]));
thrust::device_free(d_dev0 );
cutilSafeCall(cudaSetDevice(gpuid_tesla[1]));
thrust::device_free(d_dev1 );
return 1;
}
|
fb6acbaa36af4905dfbda34d9183d57e35978e62.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zpipelinedgmres.cu normal z -> d, Fri Jan 30 19:00:29 2015
@author Hartwig Anzt
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, double* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; }
__syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; }
__syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; }
__syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; }
__syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; }
__syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; }
__syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; }
__syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; }
__syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; }
__syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; }
__syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; }
__syncthreads(); }
}
__global__ void
magma_dpipelined_correction(
int n,
int k,
double * skp,
double * r,
double * v )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double zz= 0.0, tmp= 0.0;
extern __shared__ double temp[];
temp[ i ] = ( i < k ) ? skp[ i ] * skp[ i ] : MAGMA_D_MAKE( 0.0, 0.0);
__syncthreads();
if (i < 64) { temp[ i ] += temp[ i + 64 ]; } __syncthreads();
if( i < 32 ){
temp[ i ] += temp[ i + 32 ];__syncthreads();
temp[ i ] += temp[ i + 16 ];__syncthreads();
temp[ i ] += temp[ i + 8 ];__syncthreads();
temp[ i ] += temp[ i + 4 ];__syncthreads();
temp[ i ] += temp[ i + 2 ];__syncthreads();
temp[ i ] += temp[ i + 1 ];__syncthreads();
}
if( i == 0 ){
tmp = MAGMA_D_REAL( temp[ i ] );
zz = MAGMA_D_REAL( skp[(k)] );
skp[k] = MAGMA_D_MAKE( sqrt(zz-tmp),0.0 );
}
}
__global__ void
magma_dpipelined_copyscale(
int n,
int k,
double * skp,
double * r,
double * v )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double rr=skp[k];
if( i<n ){
v[i] = r[i] * 1.0 / rr;
}
}
//----------------------------------------------------------------------------//
__global__ void
magma_dpipelineddnrm2_kernel(
int m,
double * da,
int ldda,
double * dxnorm )
{
const int i = threadIdx.x;
magmaDouble_ptr dx = da + blockIdx.x * ldda;
__shared__ double sum[ 512 ];
double re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += 512 ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_D_REAL( dx[j] );
double im = MAGMA_D_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< 512 >( i, sum );
if (i==0)
dxnorm[blockIdx.x] = MAGMA_D_MAKE( sqrt(sum[0]), 0.0 );
}
//----------------------------------------------------------------------------//
__global__ void
magma_dpipelinedscale(
int n,
double * r,
double * drnorm )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i<n ){
r[i] = r[i] * 1.0 / drnorm[0];
}
}
/**
Purpose
-------
Computes the correction term of the pipelined GMRES according to P. Ghysels
and scales and copies the new search direction
Returns the vector v = r/ ( skp[k] - (sum_i=1^k skp[i]^2) ) .
Arguments
---------
@param[in]
n int
length of v_i
@param[in]
k int
# skp entries v_i^T * r ( without r )
@param[in]
r magmaDouble_ptr
vector of length n
@param[in]
v magmaDouble_ptr
vector of length n
@param[in]
skp magmaDouble_ptr
array of parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dcopyscale(
int n,
int k,
magmaDouble_ptr r,
magmaDouble_ptr v,
magmaDouble_ptr skp,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( (k+BLOCK_SIZE-1)/BLOCK_SIZE );
unsigned int Ms = Bs.x * sizeof( double );
dim3 Gs2( (n+BLOCK_SIZE-1)/BLOCK_SIZE );
hipLaunchKernelGGL(( magma_dpipelined_correction), dim3(Gs), dim3(Bs), Ms, queue ,
n, k, skp, r, v );
hipLaunchKernelGGL(( magma_dpipelined_copyscale), dim3(Gs2), dim3(Bs), 0, queue ,
n, k, skp, r, v );
return MAGMA_SUCCESS;
}
extern "C" magma_int_t
magma_dnrm2scale(
int m,
magmaDouble_ptr r,
int lddr,
magmaDouble_ptr drnorm,
magma_queue_t queue )
{
dim3 blocks( 1 );
dim3 threads( 512 );
hipLaunchKernelGGL(( magma_dpipelineddnrm2_kernel), dim3(blocks), dim3(threads), 0, queue ,
m, r, lddr, drnorm );
dim3 Bs( BLOCK_SIZE );
dim3 Gs2( (m+BLOCK_SIZE-1)/BLOCK_SIZE );
hipLaunchKernelGGL(( magma_dpipelinedscale), dim3(Gs2), dim3(Bs), 0, queue , m, r, drnorm );
return MAGMA_SUCCESS;
}
|
fb6acbaa36af4905dfbda34d9183d57e35978e62.cu
|
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zpipelinedgmres.cu normal z -> d, Fri Jan 30 19:00:29 2015
@author Hartwig Anzt
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, double* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; }
__syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; }
__syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; }
__syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; }
__syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; }
__syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; }
__syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; }
__syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; }
__syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; }
__syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; }
__syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; }
__syncthreads(); }
}
__global__ void
magma_dpipelined_correction(
int n,
int k,
double * skp,
double * r,
double * v )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double zz= 0.0, tmp= 0.0;
extern __shared__ double temp[];
temp[ i ] = ( i < k ) ? skp[ i ] * skp[ i ] : MAGMA_D_MAKE( 0.0, 0.0);
__syncthreads();
if (i < 64) { temp[ i ] += temp[ i + 64 ]; } __syncthreads();
if( i < 32 ){
temp[ i ] += temp[ i + 32 ];__syncthreads();
temp[ i ] += temp[ i + 16 ];__syncthreads();
temp[ i ] += temp[ i + 8 ];__syncthreads();
temp[ i ] += temp[ i + 4 ];__syncthreads();
temp[ i ] += temp[ i + 2 ];__syncthreads();
temp[ i ] += temp[ i + 1 ];__syncthreads();
}
if( i == 0 ){
tmp = MAGMA_D_REAL( temp[ i ] );
zz = MAGMA_D_REAL( skp[(k)] );
skp[k] = MAGMA_D_MAKE( sqrt(zz-tmp),0.0 );
}
}
__global__ void
magma_dpipelined_copyscale(
int n,
int k,
double * skp,
double * r,
double * v )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double rr=skp[k];
if( i<n ){
v[i] = r[i] * 1.0 / rr;
}
}
//----------------------------------------------------------------------------//
__global__ void
magma_dpipelineddnrm2_kernel(
int m,
double * da,
int ldda,
double * dxnorm )
{
const int i = threadIdx.x;
magmaDouble_ptr dx = da + blockIdx.x * ldda;
__shared__ double sum[ 512 ];
double re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += 512 ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_D_REAL( dx[j] );
double im = MAGMA_D_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< 512 >( i, sum );
if (i==0)
dxnorm[blockIdx.x] = MAGMA_D_MAKE( sqrt(sum[0]), 0.0 );
}
//----------------------------------------------------------------------------//
__global__ void
magma_dpipelinedscale(
int n,
double * r,
double * drnorm )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i<n ){
r[i] = r[i] * 1.0 / drnorm[0];
}
}
/**
Purpose
-------
Computes the correction term of the pipelined GMRES according to P. Ghysels
and scales and copies the new search direction
Returns the vector v = r/ ( skp[k] - (sum_i=1^k skp[i]^2) ) .
Arguments
---------
@param[in]
n int
length of v_i
@param[in]
k int
# skp entries v_i^T * r ( without r )
@param[in]
r magmaDouble_ptr
vector of length n
@param[in]
v magmaDouble_ptr
vector of length n
@param[in]
skp magmaDouble_ptr
array of parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dcopyscale(
int n,
int k,
magmaDouble_ptr r,
magmaDouble_ptr v,
magmaDouble_ptr skp,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( (k+BLOCK_SIZE-1)/BLOCK_SIZE );
unsigned int Ms = Bs.x * sizeof( double );
dim3 Gs2( (n+BLOCK_SIZE-1)/BLOCK_SIZE );
magma_dpipelined_correction<<<Gs, Bs, Ms, queue >>>
( n, k, skp, r, v );
magma_dpipelined_copyscale<<<Gs2, Bs, 0, queue >>>
( n, k, skp, r, v );
return MAGMA_SUCCESS;
}
extern "C" magma_int_t
magma_dnrm2scale(
int m,
magmaDouble_ptr r,
int lddr,
magmaDouble_ptr drnorm,
magma_queue_t queue )
{
dim3 blocks( 1 );
dim3 threads( 512 );
magma_dpipelineddnrm2_kernel<<< blocks, threads, 0, queue >>>
( m, r, lddr, drnorm );
dim3 Bs( BLOCK_SIZE );
dim3 Gs2( (m+BLOCK_SIZE-1)/BLOCK_SIZE );
magma_dpipelinedscale<<<Gs2, Bs, 0, queue >>>( m, r, drnorm );
return MAGMA_SUCCESS;
}
|
cad7ba6c17f09feeb59d61706cd683d5797658bc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Prerequisites.cuh"
#include "FFT.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "Masking.cuh"
namespace gtom
{
void d_LocalStd(tfloat* d_map, int3 dimsmap, tfloat* d_fouriermask, tfloat localradius, tfloat* d_std, tfloat* d_mean, hipfftHandle planforw, hipfftHandle planback)
{
hipfftHandle localplanforw = planforw;
if (planforw == NULL)
localplanforw = d_FFTR2CGetPlan(DimensionCount(dimsmap), dimsmap, 1);
hipfftHandle localplanback = planback;
if (planback == NULL)
localplanback = d_IFFTC2RGetPlan(DimensionCount(dimsmap), dimsmap, 1);
tcomplex* d_maskft = CudaMallocValueFilled(ElementsFFT(dimsmap), make_cuComplex(1, 1));
tfloat masksum = 0;
// Create spherical mask, calculate its sum, and pre-FFT it for convolution
{
tfloat* d_mask = CudaMallocValueFilled(Elements(dimsmap), (tfloat)1);
d_SphereMask(d_mask, d_mask, dimsmap, &localradius, 1, NULL);
d_RemapFull2FullFFT(d_mask, d_mask, dimsmap);
if (d_fouriermask == NULL)
{
tfloat* d_sum = CudaMallocValueFilled(1, (tfloat)0);
d_Sum(d_mask, d_sum, Elements(dimsmap));
hipMemcpy(&masksum, d_sum, sizeof(tfloat), hipMemcpyDeviceToHost);
hipFree(d_sum);
}
d_FFTR2C(d_mask, d_maskft, &localplanforw);
hipFree(d_mask);
if (d_fouriermask != NULL)
{
d_ComplexMultiplyByVector(d_maskft, d_fouriermask, d_maskft, ElementsFFT(dimsmap));
tfloat* d_maskconv;
hipMalloc((void**)&d_maskconv, Elements(dimsmap) * sizeof(tfloat));
d_IFFTC2R(d_maskft, d_maskconv, DimensionCount(dimsmap), dimsmap, 1, true);
tfloat* d_sum = CudaMallocValueFilled(1, (tfloat)0);
d_Sum(d_maskconv, d_sum, Elements(dimsmap));
hipMemcpy(&masksum, d_sum, sizeof(tfloat), hipMemcpyDeviceToHost);
hipFree(d_sum);
hipFree(d_maskconv);
}
}
tcomplex* d_mapft;
hipMalloc((void**)&d_mapft, ElementsFFT(dimsmap) * sizeof(tcomplex));
tcomplex* d_map2ft;
hipMalloc((void**)&d_map2ft, ElementsFFT(dimsmap) * sizeof(tcomplex));
// Create FTs of map and map^2
{
d_FFTR2C(d_map, d_mapft, &localplanforw);
tfloat* d_map2;
hipMalloc((void**)&d_map2, Elements(dimsmap) * sizeof(tfloat));
d_Square(d_map, d_map2, Elements(dimsmap));
d_FFTR2C(d_map2, d_map2ft, &localplanforw);
hipFree(d_map2);
}
tfloat* d_mapconv;
tfloat* d_map2conv;
// Convolve
{
d_ComplexMultiplyByConjVector(d_mapft, d_maskft, d_mapft, ElementsFFT(dimsmap));
d_ComplexMultiplyByConjVector(d_map2ft, d_maskft, d_map2ft, ElementsFFT(dimsmap));
hipFree(d_maskft);
hipMalloc((void**)&d_mapconv, Elements(dimsmap) * sizeof(tfloat));
d_IFFTC2R(d_mapft, d_mapconv, &localplanback, dimsmap);
hipFree(d_mapft);
hipMalloc((void**)&d_map2conv, Elements(dimsmap) * sizeof(tfloat));
d_IFFTC2R(d_map2ft, d_map2conv, &localplanback, dimsmap);
hipFree(d_map2ft);
}
// Optionally, also output local mean
if (d_mean != NULL)
{
d_DivideByScalar(d_mapconv, d_mean, Elements(dimsmap), masksum);
}
// std = sqrt(max(0, masksum * conv2 - conv1^2)) / masksum
{
d_MultiplyByScalar(d_map2conv, d_map2conv, Elements(dimsmap), masksum);
d_Square(d_mapconv, d_mapconv, Elements(dimsmap));
d_SubtractVector(d_map2conv, d_mapconv, d_map2conv, Elements(dimsmap));
d_MaxOp(d_map2conv, (tfloat)0, d_map2conv, Elements(dimsmap));
d_Sqrt(d_map2conv, d_map2conv, Elements(dimsmap));
d_DivideByScalar(d_map2conv, d_std, Elements(dimsmap), masksum);
}
hipFree(d_mapconv);
hipFree(d_map2conv);
if (planforw == NULL)
hipfftDestroy(localplanforw);
if (planback == NULL)
hipfftDestroy(localplanback);
}
}
|
cad7ba6c17f09feeb59d61706cd683d5797658bc.cu
|
#include "Prerequisites.cuh"
#include "FFT.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "Masking.cuh"
namespace gtom
{
void d_LocalStd(tfloat* d_map, int3 dimsmap, tfloat* d_fouriermask, tfloat localradius, tfloat* d_std, tfloat* d_mean, cufftHandle planforw, cufftHandle planback)
{
cufftHandle localplanforw = planforw;
if (planforw == NULL)
localplanforw = d_FFTR2CGetPlan(DimensionCount(dimsmap), dimsmap, 1);
cufftHandle localplanback = planback;
if (planback == NULL)
localplanback = d_IFFTC2RGetPlan(DimensionCount(dimsmap), dimsmap, 1);
tcomplex* d_maskft = CudaMallocValueFilled(ElementsFFT(dimsmap), make_cuComplex(1, 1));
tfloat masksum = 0;
// Create spherical mask, calculate its sum, and pre-FFT it for convolution
{
tfloat* d_mask = CudaMallocValueFilled(Elements(dimsmap), (tfloat)1);
d_SphereMask(d_mask, d_mask, dimsmap, &localradius, 1, NULL);
d_RemapFull2FullFFT(d_mask, d_mask, dimsmap);
if (d_fouriermask == NULL)
{
tfloat* d_sum = CudaMallocValueFilled(1, (tfloat)0);
d_Sum(d_mask, d_sum, Elements(dimsmap));
cudaMemcpy(&masksum, d_sum, sizeof(tfloat), cudaMemcpyDeviceToHost);
cudaFree(d_sum);
}
d_FFTR2C(d_mask, d_maskft, &localplanforw);
cudaFree(d_mask);
if (d_fouriermask != NULL)
{
d_ComplexMultiplyByVector(d_maskft, d_fouriermask, d_maskft, ElementsFFT(dimsmap));
tfloat* d_maskconv;
cudaMalloc((void**)&d_maskconv, Elements(dimsmap) * sizeof(tfloat));
d_IFFTC2R(d_maskft, d_maskconv, DimensionCount(dimsmap), dimsmap, 1, true);
tfloat* d_sum = CudaMallocValueFilled(1, (tfloat)0);
d_Sum(d_maskconv, d_sum, Elements(dimsmap));
cudaMemcpy(&masksum, d_sum, sizeof(tfloat), cudaMemcpyDeviceToHost);
cudaFree(d_sum);
cudaFree(d_maskconv);
}
}
tcomplex* d_mapft;
cudaMalloc((void**)&d_mapft, ElementsFFT(dimsmap) * sizeof(tcomplex));
tcomplex* d_map2ft;
cudaMalloc((void**)&d_map2ft, ElementsFFT(dimsmap) * sizeof(tcomplex));
// Create FTs of map and map^2
{
d_FFTR2C(d_map, d_mapft, &localplanforw);
tfloat* d_map2;
cudaMalloc((void**)&d_map2, Elements(dimsmap) * sizeof(tfloat));
d_Square(d_map, d_map2, Elements(dimsmap));
d_FFTR2C(d_map2, d_map2ft, &localplanforw);
cudaFree(d_map2);
}
tfloat* d_mapconv;
tfloat* d_map2conv;
// Convolve
{
d_ComplexMultiplyByConjVector(d_mapft, d_maskft, d_mapft, ElementsFFT(dimsmap));
d_ComplexMultiplyByConjVector(d_map2ft, d_maskft, d_map2ft, ElementsFFT(dimsmap));
cudaFree(d_maskft);
cudaMalloc((void**)&d_mapconv, Elements(dimsmap) * sizeof(tfloat));
d_IFFTC2R(d_mapft, d_mapconv, &localplanback, dimsmap);
cudaFree(d_mapft);
cudaMalloc((void**)&d_map2conv, Elements(dimsmap) * sizeof(tfloat));
d_IFFTC2R(d_map2ft, d_map2conv, &localplanback, dimsmap);
cudaFree(d_map2ft);
}
// Optionally, also output local mean
if (d_mean != NULL)
{
d_DivideByScalar(d_mapconv, d_mean, Elements(dimsmap), masksum);
}
// std = sqrt(max(0, masksum * conv2 - conv1^2)) / masksum
{
d_MultiplyByScalar(d_map2conv, d_map2conv, Elements(dimsmap), masksum);
d_Square(d_mapconv, d_mapconv, Elements(dimsmap));
d_SubtractVector(d_map2conv, d_mapconv, d_map2conv, Elements(dimsmap));
d_MaxOp(d_map2conv, (tfloat)0, d_map2conv, Elements(dimsmap));
d_Sqrt(d_map2conv, d_map2conv, Elements(dimsmap));
d_DivideByScalar(d_map2conv, d_std, Elements(dimsmap), masksum);
}
cudaFree(d_mapconv);
cudaFree(d_map2conv);
if (planforw == NULL)
cufftDestroy(localplanforw);
if (planback == NULL)
cufftDestroy(localplanback);
}
}
|
4ff98a833022bc45919714009ed52c9bb9441831.hip
|
// !!! This is a file automatically generated by hipify!!!
/*** substitui os valores aleatrios de determinado vetor de tamanho N por valores ordenados de 0 a N ***/
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#define N 2//64
__global__ void foo(int* glob) {
int a;
int* p;
a = 0;
p = &a;
*p = threadIdx.x;
glob[*p] = threadIdx.x;
}
int main(){
int* v;
int* dev_v;
/* seta o tamanho de v e inicia com com valores randmicos */
v = (int*)malloc(N*sizeof(int)); /* acessvel apenas pela CPU funo main e funes __host__ */
for (int i = 0; i < N; ++i)
v[i] = rand() %20+1;;
for (int i = 0; i < N; ++i)
printf(" %d :", v[i]);
hipMalloc((void**)&dev_v, N*sizeof(int)); /* acessvel apenas pela GPU funes __global__ */
hipMemcpy(dev_v, v, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( foo), dim3(1), dim3(N), 0, 0, dev_v);
//ESBMC_verify_kernel(foo,1,N,dev_v);
hipMemcpy(v, dev_v, N*sizeof(int), hipMemcpyDeviceToHost);
printf("\n\n\n");
for (int i = 0; i < N; ++i){
printf(" %d :", v[i]);
assert(v[i]!=i);
}
free(v);
hipFree(dev_v);
return 0;
}
|
4ff98a833022bc45919714009ed52c9bb9441831.cu
|
/*** substitui os valores aleatórios de determinado vetor de tamanho N por valores ordenados de 0 a N ***/
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define N 2//64
__global__ void foo(int* glob) {
int a;
int* p;
a = 0;
p = &a;
*p = threadIdx.x;
glob[*p] = threadIdx.x;
}
int main(){
int* v;
int* dev_v;
/* seta o tamanho de v e inicia com com valores randômicos */
v = (int*)malloc(N*sizeof(int)); /* acessível apenas pela CPU função main e funções __host__ */
for (int i = 0; i < N; ++i)
v[i] = rand() %20+1;;
for (int i = 0; i < N; ++i)
printf(" %d :", v[i]);
cudaMalloc((void**)&dev_v, N*sizeof(int)); /* acessível apenas pela GPU funções __global__ */
cudaMemcpy(dev_v, v, N*sizeof(int), cudaMemcpyHostToDevice);
foo<<<1, N>>>(dev_v);
//ESBMC_verify_kernel(foo,1,N,dev_v);
cudaMemcpy(v, dev_v, N*sizeof(int), cudaMemcpyDeviceToHost);
printf("\n\n\n");
for (int i = 0; i < N; ++i){
printf(" %d :", v[i]);
assert(v[i]!=i);
}
free(v);
cudaFree(dev_v);
return 0;
}
|
3be489ac47dd7316915633d7cb74b39353fafb89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ---------------------------------------------------------
// Author: Andy Zeng, Princeton University, 2016
// ---------------------------------------------------------
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <string>
#include "utils.hpp"
// CUDA kernel function to integrate a TSDF voxel volume given depth images
__global__
void Integrate(float * cam_K, float * cam2base, float * depth_im,
int im_height, int im_width, int voxel_grid_dim_x, int voxel_grid_dim_y, int voxel_grid_dim_z,
float voxel_grid_origin_x, float voxel_grid_origin_y, float voxel_grid_origin_z, float voxel_size, float trunc_margin,
float * voxel_grid_TSDF) {
int pt_grid_z = blockIdx.x;
int pt_grid_y = threadIdx.x;
for (int pt_grid_x = 0; pt_grid_x < voxel_grid_dim_x; ++pt_grid_x) {
// Convert voxel center from grid coordinates to base frame camera coordinates
float pt_base_x = voxel_grid_origin_x + pt_grid_x * voxel_size;
float pt_base_y = voxel_grid_origin_y + pt_grid_y * voxel_size;
float pt_base_z = voxel_grid_origin_z + pt_grid_z * voxel_size;
// Convert from base frame camera coordinates to current frame camera coordinates
float tmp_pt[3] = {0};
tmp_pt[0] = pt_base_x - cam2base[0 * 4 + 3];
tmp_pt[1] = pt_base_y - cam2base[1 * 4 + 3];
tmp_pt[2] = pt_base_z - cam2base[2 * 4 + 3];
float pt_cam_x = cam2base[0 * 4 + 0] * tmp_pt[0] + cam2base[1 * 4 + 0] * tmp_pt[1] + cam2base[2 * 4 + 0] * tmp_pt[2];
float pt_cam_y = cam2base[0 * 4 + 1] * tmp_pt[0] + cam2base[1 * 4 + 1] * tmp_pt[1] + cam2base[2 * 4 + 1] * tmp_pt[2];
float pt_cam_z = cam2base[0 * 4 + 2] * tmp_pt[0] + cam2base[1 * 4 + 2] * tmp_pt[1] + cam2base[2 * 4 + 2] * tmp_pt[2];
int volume_idx = pt_grid_z * voxel_grid_dim_y * voxel_grid_dim_x + pt_grid_y * voxel_grid_dim_x + pt_grid_x;
if (pt_cam_z <= 0) {
voxel_grid_TSDF[volume_idx] = -2.0f;
continue;
}
int pt_pix_x = roundf(cam_K[0 * 3 + 0] * (pt_cam_x / pt_cam_z) + cam_K[0 * 3 + 2]);
int pt_pix_y = roundf(cam_K[1 * 3 + 1] * (pt_cam_y / pt_cam_z) + cam_K[1 * 3 + 2]);
if (pt_pix_x < 0 || pt_pix_x >= im_width || pt_pix_y < 0 || pt_pix_y >= im_height) {
voxel_grid_TSDF[volume_idx] = -2.0f;
continue;
}
float depth_val = depth_im[pt_pix_y * im_width + pt_pix_x];
// printf("%f\n", voxel_grid_TSDF[volume_idx]);
if (depth_val > 8) {
voxel_grid_TSDF[volume_idx] = -2.0f;
continue;
}
float diff = depth_val - pt_cam_z;
// This is for labeling the -1 space (occluded space)
// sdf_threshold = 0.12
float sdf_threshold = 0.8;
/*
if (diff < -sdf_threshold || depth_val == 0.0) {
voxel_grid_TSDF[volume_idx] = 2.0f;
continue;
}
// This is for labeling the empty space
if (diff > sdf_threshold) {
voxel_grid_TSDF[volume_idx] = -1.0f;
continue;
}
*/
// Integrate
// float dist = fmin(1.0f, diff / trunc_margin);
// float weight_old = voxel_grid_weight[volume_idx];
// float weight_new = weight_old + 1.0f;
// voxel_grid_weight[volume_idx] = weight_new;
// voxel_grid_TSDF[volume_idx] = (voxel_grid_TSDF[volume_idx] * weight_old + dist) / weight_new;
if (abs(diff) < sdf_threshold) {
// voxel_grid_TSDF[volume_idx] = 1.0f;
// ((diff > 0) - (diff <= 0)) is used for giving positive or negative sign
voxel_grid_TSDF[volume_idx] = ((diff > 0) - (diff <= 0)) * (sdf_threshold - abs(diff)) / sdf_threshold;
}
}
}
// Loads a binary file with depth data and generates a TSDF voxel volume (5m x 5m x 5m at 1cm resolution)
// Volume is aligned with respect to the camera coordinates of the first frame (a.k.a. base frame)
int main(int argc, char * argv[]) {
// Location of camera intrinsic file
std::string cam_K_file = "data/camera-intrinsics.txt";
std::string cam_origin_file = "data/origin/00017227_01e40e56e7c4006efc920560ac4d26b9_fl001_rm0004_0000.txt";
std::string base2world_file = "data/camera/00017227_01e40e56e7c4006efc920560ac4d26b9_fl001_rm0004_0000.txt";
std::string depth_im_file = "data/depth_real_png/00017227_01e40e56e7c4006efc920560ac4d26b9_fl001_rm0004_0000.png";
std::string tsdf_bin_file = "tsdf.bin";
// Location of folder containing RGB-D frames and camera pose files
// std::string data_path = "data/rgbd-frames-yida";
float cam_K[3 * 3];
float cam_origin[3 * 1];
float base2world[4 * 4];
float cam2base[4 * 4];
float cam2world[4 * 4];
int im_width = 640;
int im_height = 480;
float depth_im[im_height * im_width];
// Voxel grid parameters (change these to change voxel grid resolution, etc.)
float voxel_grid_origin_x = 43.15f; // Location of voxel grid origin in base frame camera coordinates
float voxel_grid_origin_y = 50.88f;
float voxel_grid_origin_z = 0.05f;
float voxel_size = 0.06f;
float trunc_margin = 0.72f;//voxel_size * 5;
int voxel_grid_dim_x = 80;
int voxel_grid_dim_y = 80;
int voxel_grid_dim_z = 48;
// Manual parameters
if (argc > 1) {
cam_K_file = argv[1];
cam_origin_file = argv[2];
base2world_file = argv[3];
depth_im_file = argv[4];
tsdf_bin_file = argv[5];
}
// Read camera intrinsics
std::vector<float> cam_K_vec = LoadMatrixFromFile(cam_K_file, 3, 3);
std::copy(cam_K_vec.begin(), cam_K_vec.end(), cam_K);
std::vector<float> cam_origin_vec = LoadMatrixFromFile(cam_origin_file, 3, 1);
std::copy(cam_origin_vec.begin(), cam_origin_vec.end(), cam_origin);
voxel_grid_origin_x = cam_origin[0];
voxel_grid_origin_y = cam_origin[1];
voxel_grid_origin_z = cam_origin[2];
// Read base frame camera pose
std::ostringstream base_frame_prefix;
// base_frame_prefix << std::setw(6) << std::setfill('0') << base_frame_idx;
// std::string base2world_file = data_path + "/frame-" + base_frame_prefix.str() + ".pose.txt";
std::vector<float> base2world_vec = LoadMatrixFromFile(base2world_file, 4, 4);
std::copy(base2world_vec.begin(), base2world_vec.end(), base2world);
// Invert base frame camera pose to get world-to-base frame transform
float base2world_inv[16] = {0};
invert_matrix(base2world, base2world_inv);
// Initialize voxel grid
float * voxel_grid_TSDF = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
voxel_grid_TSDF[i] = 0.0f;
// Load variables to GPU memory
float * gpu_voxel_grid_TSDF;
hipMalloc(&gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
checkCUDA(__LINE__, hipGetLastError());
hipMemcpy(gpu_voxel_grid_TSDF, voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyHostToDevice);
checkCUDA(__LINE__, hipGetLastError());
float * gpu_cam_K;
float * gpu_cam2base;
float * gpu_depth_im;
hipMalloc(&gpu_cam_K, 3 * 3 * sizeof(float));
hipMemcpy(gpu_cam_K, cam_K, 3 * 3 * sizeof(float), hipMemcpyHostToDevice);
hipMalloc(&gpu_cam2base, 4 * 4 * sizeof(float));
hipMalloc(&gpu_depth_im, im_height * im_width * sizeof(float));
checkCUDA(__LINE__, hipGetLastError());
// Loop through each depth frame and integrate TSDF voxel grid
// std::ostringstream curr_frame_prefix;
// curr_frame_prefix << std::setw(6) << std::setfill('0') << frame_idx;
// // Read current frame depth
// std::string depth_im_file = data_path + "/frame-" + curr_frame_prefix.str() + ".depth.png";
ReadDepth(depth_im_file, im_height, im_width, depth_im);
// Read base frame camera pose
std::string cam2world_file = base2world_file; //data_path + "/frame-" + curr_frame_prefix.str() + ".pose.txt";
std::vector<float> cam2world_vec = LoadMatrixFromFile(cam2world_file, 4, 4);
std::copy(cam2world_vec.begin(), cam2world_vec.end(), cam2world);
// Compute relative camera pose (camera-to-base frame)
multiply_matrix(base2world_inv, cam2world, cam2base);
// yida: here we should use base2world for rotation for alignment of the ground
hipMemcpy(gpu_cam2base, base2world, 4 * 4 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_depth_im, depth_im, im_height * im_width * sizeof(float), hipMemcpyHostToDevice);
checkCUDA(__LINE__, hipGetLastError());
// std::cout << "Fusing: " << depth_im_file << std::endl;
hipLaunchKernelGGL(( Integrate) , dim3(voxel_grid_dim_z), dim3(voxel_grid_dim_y) , 0, 0, gpu_cam_K, gpu_cam2base, gpu_depth_im,
im_height, im_width, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z, voxel_size, trunc_margin,
gpu_voxel_grid_TSDF);
// Load TSDF voxel grid from GPU to CPU memory
hipMemcpy(voxel_grid_TSDF, gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyDeviceToHost);
// hipMemcpy(voxel_grid_weight, gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyDeviceToHost);
checkCUDA(__LINE__, hipGetLastError());
// Compute surface points from TSDF voxel grid and save to point cloud .ply file
// std::cout << "Saving surface point cloud (tsdf.ply)..." << std::endl;
SaveVoxelGrid2SurfacePointCloud("tsdf.ply", voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_size, voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z,
voxel_grid_TSDF);
// Save TSDF voxel grid and its parameters to disk as binary file (float array)
// std::cout << "Saving TSDF voxel grid values to disk (tsdf.bin)..." << std::endl;
std::ofstream outFile(tsdf_bin_file, std::ios::binary | std::ios::out);
/*
float voxel_grid_dim_xf = (float) voxel_grid_dim_x;
float voxel_grid_dim_yf = (float) voxel_grid_dim_y;
float voxel_grid_dim_zf = (float) voxel_grid_dim_z;
outFile.write((char*)&voxel_grid_dim_xf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_yf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_zf, sizeof(float));
outFile.write((char*)&voxel_grid_origin_x, sizeof(float));
outFile.write((char*)&voxel_grid_origin_y, sizeof(float));
outFile.write((char*)&voxel_grid_origin_z, sizeof(float));
outFile.write((char*)&voxel_size, sizeof(float));
outFile.write((char*)&trunc_margin, sizeof(float));
*/
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i) {
outFile.write((char*)&voxel_grid_TSDF[i], sizeof(float));
}
outFile.close();
return 0;
}
|
3be489ac47dd7316915633d7cb74b39353fafb89.cu
|
// ---------------------------------------------------------
// Author: Andy Zeng, Princeton University, 2016
// ---------------------------------------------------------
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <string>
#include "utils.hpp"
// CUDA kernel function to integrate a TSDF voxel volume given depth images
__global__
void Integrate(float * cam_K, float * cam2base, float * depth_im,
int im_height, int im_width, int voxel_grid_dim_x, int voxel_grid_dim_y, int voxel_grid_dim_z,
float voxel_grid_origin_x, float voxel_grid_origin_y, float voxel_grid_origin_z, float voxel_size, float trunc_margin,
float * voxel_grid_TSDF) {
int pt_grid_z = blockIdx.x;
int pt_grid_y = threadIdx.x;
for (int pt_grid_x = 0; pt_grid_x < voxel_grid_dim_x; ++pt_grid_x) {
// Convert voxel center from grid coordinates to base frame camera coordinates
float pt_base_x = voxel_grid_origin_x + pt_grid_x * voxel_size;
float pt_base_y = voxel_grid_origin_y + pt_grid_y * voxel_size;
float pt_base_z = voxel_grid_origin_z + pt_grid_z * voxel_size;
// Convert from base frame camera coordinates to current frame camera coordinates
float tmp_pt[3] = {0};
tmp_pt[0] = pt_base_x - cam2base[0 * 4 + 3];
tmp_pt[1] = pt_base_y - cam2base[1 * 4 + 3];
tmp_pt[2] = pt_base_z - cam2base[2 * 4 + 3];
float pt_cam_x = cam2base[0 * 4 + 0] * tmp_pt[0] + cam2base[1 * 4 + 0] * tmp_pt[1] + cam2base[2 * 4 + 0] * tmp_pt[2];
float pt_cam_y = cam2base[0 * 4 + 1] * tmp_pt[0] + cam2base[1 * 4 + 1] * tmp_pt[1] + cam2base[2 * 4 + 1] * tmp_pt[2];
float pt_cam_z = cam2base[0 * 4 + 2] * tmp_pt[0] + cam2base[1 * 4 + 2] * tmp_pt[1] + cam2base[2 * 4 + 2] * tmp_pt[2];
int volume_idx = pt_grid_z * voxel_grid_dim_y * voxel_grid_dim_x + pt_grid_y * voxel_grid_dim_x + pt_grid_x;
if (pt_cam_z <= 0) {
voxel_grid_TSDF[volume_idx] = -2.0f;
continue;
}
int pt_pix_x = roundf(cam_K[0 * 3 + 0] * (pt_cam_x / pt_cam_z) + cam_K[0 * 3 + 2]);
int pt_pix_y = roundf(cam_K[1 * 3 + 1] * (pt_cam_y / pt_cam_z) + cam_K[1 * 3 + 2]);
if (pt_pix_x < 0 || pt_pix_x >= im_width || pt_pix_y < 0 || pt_pix_y >= im_height) {
voxel_grid_TSDF[volume_idx] = -2.0f;
continue;
}
float depth_val = depth_im[pt_pix_y * im_width + pt_pix_x];
// printf("%f\n", voxel_grid_TSDF[volume_idx]);
if (depth_val > 8) {
voxel_grid_TSDF[volume_idx] = -2.0f;
continue;
}
float diff = depth_val - pt_cam_z;
// This is for labeling the -1 space (occluded space)
// sdf_threshold = 0.12
float sdf_threshold = 0.8;
/*
if (diff < -sdf_threshold || depth_val == 0.0) {
voxel_grid_TSDF[volume_idx] = 2.0f;
continue;
}
// This is for labeling the empty space
if (diff > sdf_threshold) {
voxel_grid_TSDF[volume_idx] = -1.0f;
continue;
}
*/
// Integrate
// float dist = fmin(1.0f, diff / trunc_margin);
// float weight_old = voxel_grid_weight[volume_idx];
// float weight_new = weight_old + 1.0f;
// voxel_grid_weight[volume_idx] = weight_new;
// voxel_grid_TSDF[volume_idx] = (voxel_grid_TSDF[volume_idx] * weight_old + dist) / weight_new;
if (abs(diff) < sdf_threshold) {
// voxel_grid_TSDF[volume_idx] = 1.0f;
// ((diff > 0) - (diff <= 0)) is used for giving positive or negative sign
voxel_grid_TSDF[volume_idx] = ((diff > 0) - (diff <= 0)) * (sdf_threshold - abs(diff)) / sdf_threshold;
}
}
}
// Loads a binary file with depth data and generates a TSDF voxel volume (5m x 5m x 5m at 1cm resolution)
// Volume is aligned with respect to the camera coordinates of the first frame (a.k.a. base frame)
int main(int argc, char * argv[]) {
// Location of camera intrinsic file
std::string cam_K_file = "data/camera-intrinsics.txt";
std::string cam_origin_file = "data/origin/00017227_01e40e56e7c4006efc920560ac4d26b9_fl001_rm0004_0000.txt";
std::string base2world_file = "data/camera/00017227_01e40e56e7c4006efc920560ac4d26b9_fl001_rm0004_0000.txt";
std::string depth_im_file = "data/depth_real_png/00017227_01e40e56e7c4006efc920560ac4d26b9_fl001_rm0004_0000.png";
std::string tsdf_bin_file = "tsdf.bin";
// Location of folder containing RGB-D frames and camera pose files
// std::string data_path = "data/rgbd-frames-yida";
float cam_K[3 * 3];
float cam_origin[3 * 1];
float base2world[4 * 4];
float cam2base[4 * 4];
float cam2world[4 * 4];
int im_width = 640;
int im_height = 480;
float depth_im[im_height * im_width];
// Voxel grid parameters (change these to change voxel grid resolution, etc.)
float voxel_grid_origin_x = 43.15f; // Location of voxel grid origin in base frame camera coordinates
float voxel_grid_origin_y = 50.88f;
float voxel_grid_origin_z = 0.05f;
float voxel_size = 0.06f;
float trunc_margin = 0.72f;//voxel_size * 5;
int voxel_grid_dim_x = 80;
int voxel_grid_dim_y = 80;
int voxel_grid_dim_z = 48;
// Manual parameters
if (argc > 1) {
cam_K_file = argv[1];
cam_origin_file = argv[2];
base2world_file = argv[3];
depth_im_file = argv[4];
tsdf_bin_file = argv[5];
}
// Read camera intrinsics
std::vector<float> cam_K_vec = LoadMatrixFromFile(cam_K_file, 3, 3);
std::copy(cam_K_vec.begin(), cam_K_vec.end(), cam_K);
std::vector<float> cam_origin_vec = LoadMatrixFromFile(cam_origin_file, 3, 1);
std::copy(cam_origin_vec.begin(), cam_origin_vec.end(), cam_origin);
voxel_grid_origin_x = cam_origin[0];
voxel_grid_origin_y = cam_origin[1];
voxel_grid_origin_z = cam_origin[2];
// Read base frame camera pose
std::ostringstream base_frame_prefix;
// base_frame_prefix << std::setw(6) << std::setfill('0') << base_frame_idx;
// std::string base2world_file = data_path + "/frame-" + base_frame_prefix.str() + ".pose.txt";
std::vector<float> base2world_vec = LoadMatrixFromFile(base2world_file, 4, 4);
std::copy(base2world_vec.begin(), base2world_vec.end(), base2world);
// Invert base frame camera pose to get world-to-base frame transform
float base2world_inv[16] = {0};
invert_matrix(base2world, base2world_inv);
// Initialize voxel grid
float * voxel_grid_TSDF = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
voxel_grid_TSDF[i] = 0.0f;
// Load variables to GPU memory
float * gpu_voxel_grid_TSDF;
cudaMalloc(&gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
checkCUDA(__LINE__, cudaGetLastError());
cudaMemcpy(gpu_voxel_grid_TSDF, voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyHostToDevice);
checkCUDA(__LINE__, cudaGetLastError());
float * gpu_cam_K;
float * gpu_cam2base;
float * gpu_depth_im;
cudaMalloc(&gpu_cam_K, 3 * 3 * sizeof(float));
cudaMemcpy(gpu_cam_K, cam_K, 3 * 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc(&gpu_cam2base, 4 * 4 * sizeof(float));
cudaMalloc(&gpu_depth_im, im_height * im_width * sizeof(float));
checkCUDA(__LINE__, cudaGetLastError());
// Loop through each depth frame and integrate TSDF voxel grid
// std::ostringstream curr_frame_prefix;
// curr_frame_prefix << std::setw(6) << std::setfill('0') << frame_idx;
// // Read current frame depth
// std::string depth_im_file = data_path + "/frame-" + curr_frame_prefix.str() + ".depth.png";
ReadDepth(depth_im_file, im_height, im_width, depth_im);
// Read base frame camera pose
std::string cam2world_file = base2world_file; //data_path + "/frame-" + curr_frame_prefix.str() + ".pose.txt";
std::vector<float> cam2world_vec = LoadMatrixFromFile(cam2world_file, 4, 4);
std::copy(cam2world_vec.begin(), cam2world_vec.end(), cam2world);
// Compute relative camera pose (camera-to-base frame)
multiply_matrix(base2world_inv, cam2world, cam2base);
// yida: here we should use base2world for rotation for alignment of the ground
cudaMemcpy(gpu_cam2base, base2world, 4 * 4 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_depth_im, depth_im, im_height * im_width * sizeof(float), cudaMemcpyHostToDevice);
checkCUDA(__LINE__, cudaGetLastError());
// std::cout << "Fusing: " << depth_im_file << std::endl;
Integrate <<< voxel_grid_dim_z, voxel_grid_dim_y >>>(gpu_cam_K, gpu_cam2base, gpu_depth_im,
im_height, im_width, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z, voxel_size, trunc_margin,
gpu_voxel_grid_TSDF);
// Load TSDF voxel grid from GPU to CPU memory
cudaMemcpy(voxel_grid_TSDF, gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(voxel_grid_weight, gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDA(__LINE__, cudaGetLastError());
// Compute surface points from TSDF voxel grid and save to point cloud .ply file
// std::cout << "Saving surface point cloud (tsdf.ply)..." << std::endl;
SaveVoxelGrid2SurfacePointCloud("tsdf.ply", voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_size, voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z,
voxel_grid_TSDF);
// Save TSDF voxel grid and its parameters to disk as binary file (float array)
// std::cout << "Saving TSDF voxel grid values to disk (tsdf.bin)..." << std::endl;
std::ofstream outFile(tsdf_bin_file, std::ios::binary | std::ios::out);
/*
float voxel_grid_dim_xf = (float) voxel_grid_dim_x;
float voxel_grid_dim_yf = (float) voxel_grid_dim_y;
float voxel_grid_dim_zf = (float) voxel_grid_dim_z;
outFile.write((char*)&voxel_grid_dim_xf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_yf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_zf, sizeof(float));
outFile.write((char*)&voxel_grid_origin_x, sizeof(float));
outFile.write((char*)&voxel_grid_origin_y, sizeof(float));
outFile.write((char*)&voxel_grid_origin_z, sizeof(float));
outFile.write((char*)&voxel_size, sizeof(float));
outFile.write((char*)&trunc_margin, sizeof(float));
*/
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i) {
outFile.write((char*)&voxel_grid_TSDF[i], sizeof(float));
}
outFile.close();
return 0;
}
|
4596e0f9f931c9faf490596ad25521bf448048c8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <string>
#include <map>
#include <set>
/* every tool needs to include this once */
#include "nvbit_tool.h"
/* nvbit interface file */
#include "nvbit.h"
/* for channel */
#include "utils/channel.hpp"
/* Channel used to communicate from GPU to CPU receiving thread */
#define CHANNEL_SIZE (1l << 20)
static __managed__ ChannelDev channel_dev;
static ChannelHost channel_host;
/* receiving thread and its control variables */
pthread_t recv_thread;
volatile bool recv_thread_started = false;
volatile bool recv_thread_receiving = false;
__managed__ uint64_t counter = 0;
/* skip flag used to avoid re-entry on the nvbit_callback when issuing
* flush_channel kernel call */
bool skip_flag = false;
int cache_line_size = 128;
/* global control variables for this tool */
uint32_t instr_begin_interval = 0;
uint32_t instr_end_interval = UINT32_MAX;
int verbose = 1;
int kernel_id=0;
/* global control variables for this tool */
uint32_t ker_begin_interval = 0;
uint32_t ker_end_interval = UINT32_MAX;
int count_warp_level = 1;
int exclude_pred_off = 0;
/* opcode to id map and reverse map */
std::map<std::string, int> opcode_to_id_map;
std::map<int, std::string> id_to_opcode_map;
/* information collected in the instrumentation function */
typedef struct {
int cta_id_x;
int bb_id;
int warp_id;
} bb_t;
/* instrumentation function that we want to inject, please note the use of
* 1. "extern "C" __device__ __noinline__" to prevent code elimination by the
* compiler.
* 2. NVBIT_EXPORT_FUNC(count_instrs) to notify nvbit the name of the function
* we want to inject. This name must match exactly the function name */
extern "C" __device__ __noinline__ void count_instrs(int num_instrs,
int count_warp_level,int bb_id) {
/* all the active threads will compute the active mask */
const int active_mask = __ballot(1);
/* each thread will get a lane id (get_lane_id is in utils/utils.h) */
const int laneid = get_laneid();
/* get the id of the first active thread */
const int first_laneid = __ffs(active_mask) - 1;
/* count all the active thread */
const int num_threads = __popc(active_mask);
int warp_id = get_global_warp_id();
/* only the first active thread will perform the atomic */
if (first_laneid == laneid) {
if (count_warp_level) {
atomicAdd((unsigned long long *)&counter, 1 * num_instrs);
bb_t warp_bb;
warp_bb.bb_id=bb_id;
warp_bb.warp_id=warp_id;
warp_bb.cta_id_x=get_ctaid().x;
channel_dev.push(&warp_bb, sizeof(bb_t));
} else {
atomicAdd((unsigned long long *)&counter, num_threads * num_instrs);
}
}
}
NVBIT_EXPORT_FUNC(count_instrs);
extern "C" __device__ __noinline__ void count_pred_off(int predicate,
int count_warp_level) {
const int active_mask = __ballot(1);
const int laneid = get_laneid();
const int first_laneid = __ffs(active_mask) - 1;
const int predicate_mask = __ballot(predicate);
const int mask_off = active_mask ^ predicate_mask;
const int num_threads_off = __popc(mask_off);
if (first_laneid == laneid) {
if (count_warp_level) {
/* if the predicate mask was off we reduce the count of 1 */
if (predicate_mask == 0)
atomicAdd((unsigned long long *)&counter, -1);
} else {
atomicAdd((unsigned long long *)&counter, -num_threads_off);
}
}
}
NVBIT_EXPORT_FUNC(count_pred_off)
/* nvbit_at_init() is executed as soon as the nvbit tool is loaded. We
* typically do initializations in this call. In this case for instance we get
* some environment variables values which we use as input arguments to the tool
*/
void nvbit_at_init() {
/* just make sure all managed variables are allocated on GPU */
setenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC", "1", 1);
/* we get some environment variables that are going to be use to selectively
* instrument (within a interval of kernel indexes and instructions). By
* default we instrument everything. */
GET_VAR_INT(ker_begin_interval, "KERNEL_BEGIN", 0,
"Beginning of the kernel launch interval where to apply "
"instrumentation");
GET_VAR_INT(
ker_end_interval, "KERNEL_END", UINT32_MAX,
"End of the kernel launch interval where to apply instrumentation");
GET_VAR_INT(count_warp_level, "COUNT_WARP_LEVEL", 1,
"Count warp level or thread level instructions");
GET_VAR_INT(exclude_pred_off, "EXCLUDE_PRED_OFF", 0,
"Exclude predicated off instruction from count");
GET_VAR_INT(verbose, "TOOL_VERBOSE", 0, "Enable verbosity inside the tool");
std::string pad(100, '-');
printf("%s\n", pad.c_str());
}
/* nvbit_at_function_first_load() is executed every time a function is loaded
* for the first time. Inside this call-back we typically get the vector of SASS
* instructions composing the loaded hipFunction_t. We can iterate on this vector
* and insert call to instrumentation functions before or after each one of
* them. */
void nvbit_at_function_first_load(hipCtx_t ctx, hipFunction_t func) {
/* Get the static control flow graph of instruction */
const CFG_t &cfg = nvbit_get_CFG(ctx, func);
if (cfg.is_degenerate) {
printf(
"Warning: Function %s is degenerated, we can't compute basic "
"blocks statically",
nvbit_get_func_name(ctx, func));
}
if (verbose) {
printf("Function %s\n", nvbit_get_func_name(ctx, func));
/* print */
int cnt = 0;
for (auto &bb : cfg.bbs) {
printf("Basic block id %d - num instructions %ld\n", cnt++,
bb->instrs.size());
for (auto &i : bb->instrs) {
i->print(" ");
}
}
}
if (verbose) {
printf("inspecting %s - number basic blocks %ld\n",
nvbit_get_func_name(ctx, func), cfg.bbs.size());
}
/* Iterate on basic block and inject the first instruction */
int bb_id=0;
for (auto &bb : cfg.bbs) {
Instr *i = bb->instrs[0];
/* inject device function */
nvbit_insert_call(i, "count_instrs", IPOINT_BEFORE);
/* add size of basic block in number of instruction */
nvbit_add_call_arg_const_val32(i, bb->instrs.size());
/* add count warp level option */
nvbit_add_call_arg_const_val32(i, count_warp_level);
nvbit_add_call_arg_const_val32(i,bb_id);
bb_id++;
if (verbose) {
i->print("Inject count_instr before - ");
}
}
if (exclude_pred_off) {
/* iterate on instructions */
for (auto i : nvbit_get_instrs(ctx, func)) {
/* inject only if instruction has predicate */
if (i->hasPred()) {
/* inject function */
nvbit_insert_call(i, "count_pred_off", IPOINT_BEFORE);
/* add predicate as argument */
nvbit_add_call_arg_pred_val(i);
/* add count warp level option */
nvbit_add_call_arg_const_val32(i, count_warp_level);
if (verbose) {
i->print("Inject count_instr before - ");
}
}
}
}
}
__global__ void flush_channel() {
/* push memory access with negative cta id to communicate the kernel is
* completed */
bb_t bb_s;
bb_s.cta_id_x = -1;
channel_dev.push(&bb_s, sizeof(bb_t));
/* flush channel */
channel_dev.flush();
}
void nvbit_at_cuda_event(hipCtx_t ctx, int is_exit, nvbit_api_cuda_t cbid,
const char *name, void *params, hipError_t *pStatus) {
if (skip_flag) return;
if (cbid == API_CUDA_cuLaunchKernel_ptsz ||
cbid == API_CUDA_cuLaunchKernel) {
cuLaunchKernel_params *p = (cuLaunchKernel_params *)params;
if (!is_exit) {
int nregs;
CUDA_SAFECALL(
hipFuncGetAttribute(&nregs, hipFuncAttributeNumRegs, p->f));
int shmem_static_nbytes;
CUDA_SAFECALL(hipFuncGetAttribute(&shmem_static_nbytes,
hipFuncAttributeSharedSizeBytes,
p->f));
printf(
"Kernel %s - grid size %d,%d,%d - block size %d,%d,%d - nregs "
"%d - shmem %d - cuda stream id %ld\n",
nvbit_get_func_name(ctx, p->f), p->gridDimX, p->gridDimY,
p->gridDimZ, p->blockDimX, p->blockDimY, p->blockDimZ, nregs,
shmem_static_nbytes + p->sharedMemBytes, (uint64_t)p->hStream);
recv_thread_receiving = true;
} else {
kernel_id++;
/* make sure current kernel is completed */
hipDeviceSynchronize();
assert(hipGetLastError() == hipSuccess);
/* make sure we prevent re-entry on the nvbit_callback when issuing
* the flush_channel kernel */
skip_flag = true;
/* issue flush of channel so we are sure all the memory accesses
* have been pushed */
hipLaunchKernelGGL(( flush_channel), dim3(1), dim3(1), 0, 0, );
hipDeviceSynchronize();
assert(hipGetLastError() == hipSuccess);
/* unset the skip flag */
skip_flag = false;
/* wait here until the receiving thread has not finished with the
* current kernel */
while (recv_thread_receiving) {
pthread_yield();
}
}
}
}
void *recv_thread_fun(void *) {
char *recv_buffer = (char *)malloc(CHANNEL_SIZE);
while (recv_thread_started) {
uint32_t num_recv_bytes = 0;
if (recv_thread_receiving &&
(num_recv_bytes = channel_host.recv(recv_buffer, CHANNEL_SIZE)) >
0) {
uint32_t num_processed_bytes = 0;
while (num_processed_bytes < num_recv_bytes) {
bb_t *warp_bb =
(bb_t *)&recv_buffer[num_processed_bytes];
/* when we get this cta_id_x it means the kernel has completed
*/
if (warp_bb->cta_id_x == -1) {
recv_thread_receiving = false;
break;
}
int warp_id =warp_bb->warp_id;
int bb_id =warp_bb->bb_id;
char fn[100];
snprintf(fn,sizeof(fn),"./bb_trace_%d.txt",kernel_id);
FILE * f = fopen(fn,"a");
if(f!=NULL)
{fprintf(f,"%d,%d\n",warp_id,bb_id);
}
fclose(f);
num_processed_bytes += sizeof(bb_t);
}
}
}
/*
for(std::map<int,std::vector<mem_access_t *>>::iterator it=per_warp_mem_trace.begin(); it!=per_warp_mem_trace.end();++it)
{std::vector<mem_access_t *> trace = it->second;
FILE * f =fopen("./mem_trace.txt","a");
if(f!=NULL)
{
for (int i =0; i< trace.size();i++)
{fprintf(f,"%d, %d,%d,%d,",trace[i]->warp_id,trace[i]->sm_id,trace[i]->offset,trace[i]->RoW);//warp_id, pc, RoW
std::set<uint64_t> coalesced_addr;
for (int j=0; j< 32; j++)
{ uint64_t cache_line_addr = trace[i]->addrs[j]/cache_line_size;
coalesced_addr.insert(cache_line_addr);
}
for (std::set<uint64_t>:: iterator addr=coalesced_addr.begin();addr!=coalesced_addr.end();++addr)
fprintf(f,"%lld,",*addr);
fprintf(f,"\n");
}
}
fclose(f);
}
*/
free(recv_buffer);
return NULL;
}
void nvbit_at_ctx_init(hipCtx_t ctx) {
recv_thread_started = true;
channel_host.init(0, CHANNEL_SIZE, &channel_dev, NULL);
pthread_create(&recv_thread, NULL, recv_thread_fun, NULL);
}
void nvbit_at_ctx_term(hipCtx_t ctx) {
if (recv_thread_started) {
recv_thread_started = false;
pthread_join(recv_thread, NULL);
}
}
|
4596e0f9f931c9faf490596ad25521bf448048c8.cu
|
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <string>
#include <map>
#include <set>
/* every tool needs to include this once */
#include "nvbit_tool.h"
/* nvbit interface file */
#include "nvbit.h"
/* for channel */
#include "utils/channel.hpp"
/* Channel used to communicate from GPU to CPU receiving thread */
#define CHANNEL_SIZE (1l << 20)
static __managed__ ChannelDev channel_dev;
static ChannelHost channel_host;
/* receiving thread and its control variables */
pthread_t recv_thread;
volatile bool recv_thread_started = false;
volatile bool recv_thread_receiving = false;
__managed__ uint64_t counter = 0;
/* skip flag used to avoid re-entry on the nvbit_callback when issuing
* flush_channel kernel call */
bool skip_flag = false;
int cache_line_size = 128;
/* global control variables for this tool */
uint32_t instr_begin_interval = 0;
uint32_t instr_end_interval = UINT32_MAX;
int verbose = 1;
int kernel_id=0;
/* global control variables for this tool */
uint32_t ker_begin_interval = 0;
uint32_t ker_end_interval = UINT32_MAX;
int count_warp_level = 1;
int exclude_pred_off = 0;
/* opcode to id map and reverse map */
std::map<std::string, int> opcode_to_id_map;
std::map<int, std::string> id_to_opcode_map;
/* information collected in the instrumentation function */
typedef struct {
int cta_id_x;
int bb_id;
int warp_id;
} bb_t;
/* instrumentation function that we want to inject, please note the use of
* 1. "extern "C" __device__ __noinline__" to prevent code elimination by the
* compiler.
* 2. NVBIT_EXPORT_FUNC(count_instrs) to notify nvbit the name of the function
* we want to inject. This name must match exactly the function name */
extern "C" __device__ __noinline__ void count_instrs(int num_instrs,
int count_warp_level,int bb_id) {
/* all the active threads will compute the active mask */
const int active_mask = __ballot(1);
/* each thread will get a lane id (get_lane_id is in utils/utils.h) */
const int laneid = get_laneid();
/* get the id of the first active thread */
const int first_laneid = __ffs(active_mask) - 1;
/* count all the active thread */
const int num_threads = __popc(active_mask);
int warp_id = get_global_warp_id();
/* only the first active thread will perform the atomic */
if (first_laneid == laneid) {
if (count_warp_level) {
atomicAdd((unsigned long long *)&counter, 1 * num_instrs);
bb_t warp_bb;
warp_bb.bb_id=bb_id;
warp_bb.warp_id=warp_id;
warp_bb.cta_id_x=get_ctaid().x;
channel_dev.push(&warp_bb, sizeof(bb_t));
} else {
atomicAdd((unsigned long long *)&counter, num_threads * num_instrs);
}
}
}
NVBIT_EXPORT_FUNC(count_instrs);
extern "C" __device__ __noinline__ void count_pred_off(int predicate,
int count_warp_level) {
const int active_mask = __ballot(1);
const int laneid = get_laneid();
const int first_laneid = __ffs(active_mask) - 1;
const int predicate_mask = __ballot(predicate);
const int mask_off = active_mask ^ predicate_mask;
const int num_threads_off = __popc(mask_off);
if (first_laneid == laneid) {
if (count_warp_level) {
/* if the predicate mask was off we reduce the count of 1 */
if (predicate_mask == 0)
atomicAdd((unsigned long long *)&counter, -1);
} else {
atomicAdd((unsigned long long *)&counter, -num_threads_off);
}
}
}
NVBIT_EXPORT_FUNC(count_pred_off)
/* nvbit_at_init() is executed as soon as the nvbit tool is loaded. We
* typically do initializations in this call. In this case for instance we get
* some environment variables values which we use as input arguments to the tool
*/
void nvbit_at_init() {
/* just make sure all managed variables are allocated on GPU */
setenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC", "1", 1);
/* we get some environment variables that are going to be use to selectively
* instrument (within a interval of kernel indexes and instructions). By
* default we instrument everything. */
GET_VAR_INT(ker_begin_interval, "KERNEL_BEGIN", 0,
"Beginning of the kernel launch interval where to apply "
"instrumentation");
GET_VAR_INT(
ker_end_interval, "KERNEL_END", UINT32_MAX,
"End of the kernel launch interval where to apply instrumentation");
GET_VAR_INT(count_warp_level, "COUNT_WARP_LEVEL", 1,
"Count warp level or thread level instructions");
GET_VAR_INT(exclude_pred_off, "EXCLUDE_PRED_OFF", 0,
"Exclude predicated off instruction from count");
GET_VAR_INT(verbose, "TOOL_VERBOSE", 0, "Enable verbosity inside the tool");
std::string pad(100, '-');
printf("%s\n", pad.c_str());
}
/* nvbit_at_function_first_load() is executed every time a function is loaded
* for the first time. Inside this call-back we typically get the vector of SASS
* instructions composing the loaded CUfunction. We can iterate on this vector
* and insert call to instrumentation functions before or after each one of
* them. */
void nvbit_at_function_first_load(CUcontext ctx, CUfunction func) {
/* Get the static control flow graph of instruction */
const CFG_t &cfg = nvbit_get_CFG(ctx, func);
if (cfg.is_degenerate) {
printf(
"Warning: Function %s is degenerated, we can't compute basic "
"blocks statically",
nvbit_get_func_name(ctx, func));
}
if (verbose) {
printf("Function %s\n", nvbit_get_func_name(ctx, func));
/* print */
int cnt = 0;
for (auto &bb : cfg.bbs) {
printf("Basic block id %d - num instructions %ld\n", cnt++,
bb->instrs.size());
for (auto &i : bb->instrs) {
i->print(" ");
}
}
}
if (verbose) {
printf("inspecting %s - number basic blocks %ld\n",
nvbit_get_func_name(ctx, func), cfg.bbs.size());
}
/* Iterate on basic block and inject the first instruction */
int bb_id=0;
for (auto &bb : cfg.bbs) {
Instr *i = bb->instrs[0];
/* inject device function */
nvbit_insert_call(i, "count_instrs", IPOINT_BEFORE);
/* add size of basic block in number of instruction */
nvbit_add_call_arg_const_val32(i, bb->instrs.size());
/* add count warp level option */
nvbit_add_call_arg_const_val32(i, count_warp_level);
nvbit_add_call_arg_const_val32(i,bb_id);
bb_id++;
if (verbose) {
i->print("Inject count_instr before - ");
}
}
if (exclude_pred_off) {
/* iterate on instructions */
for (auto i : nvbit_get_instrs(ctx, func)) {
/* inject only if instruction has predicate */
if (i->hasPred()) {
/* inject function */
nvbit_insert_call(i, "count_pred_off", IPOINT_BEFORE);
/* add predicate as argument */
nvbit_add_call_arg_pred_val(i);
/* add count warp level option */
nvbit_add_call_arg_const_val32(i, count_warp_level);
if (verbose) {
i->print("Inject count_instr before - ");
}
}
}
}
}
__global__ void flush_channel() {
/* push memory access with negative cta id to communicate the kernel is
* completed */
bb_t bb_s;
bb_s.cta_id_x = -1;
channel_dev.push(&bb_s, sizeof(bb_t));
/* flush channel */
channel_dev.flush();
}
void nvbit_at_cuda_event(CUcontext ctx, int is_exit, nvbit_api_cuda_t cbid,
const char *name, void *params, CUresult *pStatus) {
if (skip_flag) return;
if (cbid == API_CUDA_cuLaunchKernel_ptsz ||
cbid == API_CUDA_cuLaunchKernel) {
cuLaunchKernel_params *p = (cuLaunchKernel_params *)params;
if (!is_exit) {
int nregs;
CUDA_SAFECALL(
cuFuncGetAttribute(&nregs, CU_FUNC_ATTRIBUTE_NUM_REGS, p->f));
int shmem_static_nbytes;
CUDA_SAFECALL(cuFuncGetAttribute(&shmem_static_nbytes,
CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES,
p->f));
printf(
"Kernel %s - grid size %d,%d,%d - block size %d,%d,%d - nregs "
"%d - shmem %d - cuda stream id %ld\n",
nvbit_get_func_name(ctx, p->f), p->gridDimX, p->gridDimY,
p->gridDimZ, p->blockDimX, p->blockDimY, p->blockDimZ, nregs,
shmem_static_nbytes + p->sharedMemBytes, (uint64_t)p->hStream);
recv_thread_receiving = true;
} else {
kernel_id++;
/* make sure current kernel is completed */
cudaDeviceSynchronize();
assert(cudaGetLastError() == cudaSuccess);
/* make sure we prevent re-entry on the nvbit_callback when issuing
* the flush_channel kernel */
skip_flag = true;
/* issue flush of channel so we are sure all the memory accesses
* have been pushed */
flush_channel<<<1, 1>>>();
cudaDeviceSynchronize();
assert(cudaGetLastError() == cudaSuccess);
/* unset the skip flag */
skip_flag = false;
/* wait here until the receiving thread has not finished with the
* current kernel */
while (recv_thread_receiving) {
pthread_yield();
}
}
}
}
void *recv_thread_fun(void *) {
char *recv_buffer = (char *)malloc(CHANNEL_SIZE);
while (recv_thread_started) {
uint32_t num_recv_bytes = 0;
if (recv_thread_receiving &&
(num_recv_bytes = channel_host.recv(recv_buffer, CHANNEL_SIZE)) >
0) {
uint32_t num_processed_bytes = 0;
while (num_processed_bytes < num_recv_bytes) {
bb_t *warp_bb =
(bb_t *)&recv_buffer[num_processed_bytes];
/* when we get this cta_id_x it means the kernel has completed
*/
if (warp_bb->cta_id_x == -1) {
recv_thread_receiving = false;
break;
}
int warp_id =warp_bb->warp_id;
int bb_id =warp_bb->bb_id;
char fn[100];
snprintf(fn,sizeof(fn),"./bb_trace_%d.txt",kernel_id);
FILE * f = fopen(fn,"a");
if(f!=NULL)
{fprintf(f,"%d,%d\n",warp_id,bb_id);
}
fclose(f);
num_processed_bytes += sizeof(bb_t);
}
}
}
/*
for(std::map<int,std::vector<mem_access_t *>>::iterator it=per_warp_mem_trace.begin(); it!=per_warp_mem_trace.end();++it)
{std::vector<mem_access_t *> trace = it->second;
FILE * f =fopen("./mem_trace.txt","a");
if(f!=NULL)
{
for (int i =0; i< trace.size();i++)
{fprintf(f,"%d, %d,%d,%d,",trace[i]->warp_id,trace[i]->sm_id,trace[i]->offset,trace[i]->RoW);//warp_id, pc, RoW
std::set<uint64_t> coalesced_addr;
for (int j=0; j< 32; j++)
{ uint64_t cache_line_addr = trace[i]->addrs[j]/cache_line_size;
coalesced_addr.insert(cache_line_addr);
}
for (std::set<uint64_t>:: iterator addr=coalesced_addr.begin();addr!=coalesced_addr.end();++addr)
fprintf(f,"%lld,",*addr);
fprintf(f,"\n");
}
}
fclose(f);
}
*/
free(recv_buffer);
return NULL;
}
void nvbit_at_ctx_init(CUcontext ctx) {
recv_thread_started = true;
channel_host.init(0, CHANNEL_SIZE, &channel_dev, NULL);
pthread_create(&recv_thread, NULL, recv_thread_fun, NULL);
}
void nvbit_at_ctx_term(CUcontext ctx) {
if (recv_thread_started) {
recv_thread_started = false;
pthread_join(recv_thread, NULL);
}
}
|
4f67ecb192a1a51c80ae4a378265abdad40940a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlascl_diag.cu, normal z -> d, Tue Aug 30 09:38:32 2016
*/
#include "magma_internal.h"
#define MB 64
#define NB 160
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_diag_lower(
int m, int n,
const double* D, int ldd,
double* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_D_DIV( A[j*lda], D[j + j*ldd] );
}
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_diag_upper(
int m, int n,
const double* D, int ldd,
double* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_D_DIV( A[j*lda], D[ind_x + ind_x*ldd] );
}
}
}
/***************************************************************************//**
Purpose
-------
DLASCL_DIAG scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be upper triangular or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD DOUBLE PRECISION vector, dimension (LDDD,M)
The matrix storing the scaling factor on its diagonal.
@param[in]
lddd INTEGER
The leading dimension of the array D.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_dlascl_diag_q(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dD, magma_int_t lddd,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( lddd < max(1,m) )
*info = -5;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( MB );
dim3 grid( magma_ceildiv( m, MB ), magma_ceildiv( n, NB ) );
if (type == MagmaLower) {
hipLaunchKernelGGL(( dlascl_diag_lower)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dD, lddd, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( dlascl_diag_upper)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dD, lddd, dA, ldda);
}
}
|
4f67ecb192a1a51c80ae4a378265abdad40940a1.cu
|
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlascl_diag.cu, normal z -> d, Tue Aug 30 09:38:32 2016
*/
#include "magma_internal.h"
#define MB 64
#define NB 160
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_diag_lower(
int m, int n,
const double* D, int ldd,
double* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_D_DIV( A[j*lda], D[j + j*ldd] );
}
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_diag_upper(
int m, int n,
const double* D, int ldd,
double* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_D_DIV( A[j*lda], D[ind_x + ind_x*ldd] );
}
}
}
/***************************************************************************//**
Purpose
-------
DLASCL_DIAG scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be upper triangular or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD DOUBLE PRECISION vector, dimension (LDDD,M)
The matrix storing the scaling factor on its diagonal.
@param[in]
lddd INTEGER
The leading dimension of the array D.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_dlascl_diag_q(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dD, magma_int_t lddd,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( lddd < max(1,m) )
*info = -5;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( MB );
dim3 grid( magma_ceildiv( m, MB ), magma_ceildiv( n, NB ) );
if (type == MagmaLower) {
dlascl_diag_lower
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, dD, lddd, dA, ldda);
}
else if (type == MagmaUpper) {
dlascl_diag_upper
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, dD, lddd, dA, ldda);
}
}
|
22f7e9fe2b9c0d93ece0e918c0fcef4994f87b58.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _reg_tools_kernels.cu
*
* Created by Marc Modat and Pankaj Daga on 24/03/2009.
* Copyright (c) 2009-2018, University College London
* Copyright (c) 2018, NiftyReg Developers.
* All rights reserved.
* See the LICENSE.txt file in the nifty_reg root folder
*/
#ifndef _REG_TOOLS_KERNELS_CU
#define _REG_TOOLS_KERNELS_CU
/* *************************************************************** */
__device__ __constant__ int c_NodeNumber;
__device__ __constant__ int c_VoxelNumber;
__device__ __constant__ int3 c_TargetImageDim;
__device__ __constant__ float3 c_VoxelNodeRatio;
__device__ __constant__ int3 c_ControlPointImageDim;
__device__ __constant__ int3 c_ImageDim;
__device__ __constant__ float c_Weight;
/* *************************************************************** */
texture<float4, 1, hipReadModeElementType> controlPointTexture;
texture<float4, 1, hipReadModeElementType> gradientImageTexture;
texture<float4, 1, hipReadModeElementType> matrixTexture;
texture<float, 1, hipReadModeElementType> convolutionKernelTexture;
/* *************************************************************** */
__global__ void reg_voxelCentric2NodeCentric_kernel(float4 *nodeNMIGradientArray_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_NodeNumber){
const int3 gridSize = c_ControlPointImageDim;
int tempIndex=tid;
const short z =(int)(tempIndex/(gridSize.x*gridSize.y));
tempIndex -= z*(gridSize.x)*(gridSize.y);
const short y =(int)(tempIndex/(gridSize.x));
const short x = tempIndex - y*(gridSize.x);
const float3 ratio = c_VoxelNodeRatio;
const short X = round((x-1)*ratio.x);
const short Y = round((y-1)*ratio.y);
const short Z = round((z-1)*ratio.z);
const int3 imageSize = c_TargetImageDim;
if(-1<X && X<imageSize.x && -1<Y && Y<imageSize.y && -1<Z && Z<imageSize.z){
int index = (Z*imageSize.y+Y)*imageSize.x+X;
float4 gradientValue = tex1Dfetch(gradientImageTexture,index);
nodeNMIGradientArray_d[tid] = make_float4(c_Weight*gradientValue.x,
c_Weight*gradientValue.y,
c_Weight*gradientValue.z,
0.0f);
}
else nodeNMIGradientArray_d[tid]=make_float4(0, 0.0f, 0.0f, 0.0f);
}
}
/* *************************************************************** */
__global__ void _reg_convertNMIGradientFromVoxelToRealSpace_kernel(float4 *gradient)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_NodeNumber){
float4 voxelGradient = gradient[tid];
float4 realGradient;
float4 matrix = tex1Dfetch(matrixTexture,0);
realGradient.x = matrix.x*voxelGradient.x + matrix.y*voxelGradient.y +
matrix.z*voxelGradient.z;
matrix = tex1Dfetch(matrixTexture,1);
realGradient.y = matrix.x*voxelGradient.x + matrix.y*voxelGradient.y +
matrix.z*voxelGradient.z;
matrix = tex1Dfetch(matrixTexture,2);
realGradient.z = matrix.x*voxelGradient.x + matrix.y*voxelGradient.y +
matrix.z*voxelGradient.z;
gradient[tid]=realGradient;
}
}
__global__ void _reg_ApplyConvolutionWindowAlongX_kernel( float4 *smoothedImage,
int windowSize)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
int3 imageSize = c_ImageDim;
int temp=tid;
const short z=(int)(temp/(imageSize.x*imageSize.y));
temp -= z*imageSize.x*imageSize.y;
const short y =(int)(temp/(imageSize.x));
short x = temp - y*(imageSize.x);
int radius = (windowSize-1)/2;
int index = tid - radius;
x -= radius;
float4 finalValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// Kahan summation used here
float3 c=make_float3(0.f,0.f,0.f), Y, t;
float windowValue;
for(int i=0; i<windowSize; i++){
if(-1<x && x<imageSize.x){
float4 gradientValue = tex1Dfetch(gradientImageTexture,index);
windowValue = tex1Dfetch(convolutionKernelTexture,i);
Y.x = gradientValue.x * windowValue - c.x;
Y.y = gradientValue.y * windowValue - c.y;
Y.z = gradientValue.z * windowValue - c.z;
t.x = finalValue.x + Y.x;
t.y = finalValue.y + Y.y;
t.z = finalValue.z + Y.z;
c.x = (t.x - finalValue.x) - Y.x;
c.y = (t.y - finalValue.y) - Y.y;
c.z = (t.z - finalValue.z) - Y.z;
finalValue = make_float4(t.x, t.y, t.z, 0.f);
}
index++;
x++;
}
smoothedImage[tid] = finalValue;
}
return;
}
__global__ void _reg_ApplyConvolutionWindowAlongY_kernel(float4 *smoothedImage,
int windowSize)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
int3 imageSize = c_ImageDim;
const short z=(int)(tid/(imageSize.x*imageSize.y));
int index = tid - z*imageSize.x*imageSize.y;
short y=(int)(index/imageSize.x);
int radius = (windowSize-1)/2;
index = tid - imageSize.x*radius;
y -= radius;
float4 finalValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// Kahan summation used here
float3 c=make_float3(0.f,0.f,0.f), Y, t;
float windowValue;
for(int i=0; i<windowSize; i++){
if(-1<y && y<imageSize.y){
float4 gradientValue = tex1Dfetch(gradientImageTexture,index);
windowValue = tex1Dfetch(convolutionKernelTexture,i);
Y.x = gradientValue.x * windowValue - c.x;
Y.y = gradientValue.y * windowValue - c.y;
Y.z = gradientValue.z * windowValue - c.z;
t.x = finalValue.x + Y.x;
t.y = finalValue.y + Y.y;
t.z = finalValue.z + Y.z;
c.x = (t.x - finalValue.x) - Y.x;
c.y = (t.y - finalValue.y) - Y.y;
c.z = (t.z - finalValue.z) - Y.z;
finalValue = make_float4(t.x, t.y, t.z, 0.f);
}
index += imageSize.x;
y++;
}
smoothedImage[tid] = finalValue;
}
return;
}
__global__ void _reg_ApplyConvolutionWindowAlongZ_kernel(float4 *smoothedImage,
int windowSize)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
int3 imageSize = c_ImageDim;
short z=(int)(tid/((imageSize.x)*(imageSize.y)));
int radius = (windowSize-1)/2;
int index = tid - imageSize.x*imageSize.y*radius;
z -= radius;
float4 finalValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// Kahan summation used here
float3 c=make_float3(0.f,0.f,0.f), Y, t;
float windowValue;
for(int i=0; i<windowSize; i++){
if(-1<z && z<imageSize.z){
float4 gradientValue = tex1Dfetch(gradientImageTexture,index);
windowValue = tex1Dfetch(convolutionKernelTexture,i);
Y.x = gradientValue.x * windowValue - c.x;
Y.y = gradientValue.y * windowValue - c.y;
Y.z = gradientValue.z * windowValue - c.z;
t.x = finalValue.x + Y.x;
t.y = finalValue.y + Y.y;
t.z = finalValue.z + Y.z;
c.x = (t.x - finalValue.x) - Y.x;
c.y = (t.y - finalValue.y) - Y.y;
c.z = (t.z - finalValue.z) - Y.z;
finalValue = make_float4(t.x, t.y, t.z, 0.f);
}
index += imageSize.x*imageSize.y;
z++;
}
smoothedImage[tid] = finalValue;
}
return;
}
/* *************************************************************** */
__global__ void reg_multiplyValue_kernel_float(float *array_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
array_d[tid] *= c_Weight;
}
}
/* *************************************************************** */
__global__ void reg_multiplyValue_kernel_float4(float4 *array_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
float4 temp = array_d[tid];
array_d[tid] = make_float4(temp.x*c_Weight,temp.y*c_Weight,temp.z*c_Weight,temp.w*c_Weight);
}
}
/* *************************************************************** */
__global__ void reg_addValue_kernel_float(float *array_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
array_d[tid] += c_Weight;
}
}
/* *************************************************************** */
__global__ void reg_addValue_kernel_float4(float4 *array_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
float4 temp = array_d[tid];
array_d[tid] = make_float4(temp.x+c_Weight,temp.y+c_Weight,temp.z+c_Weight,temp.w+c_Weight);
}
}
/* *************************************************************** */
__global__ void reg_multiplyArrays_kernel_float(float *array1_d, float *array2_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
array1_d[tid] *= array2_d[tid];
}
}
/* *************************************************************** */
__global__ void reg_multiplyArrays_kernel_float4(float4 *array1_d, float4 *array2_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
float4 a = array1_d[tid];
float4 b = array1_d[tid];
array1_d[tid] = make_float4(a.x*b.x,a.y*b.y,a.z*b.z,a.w*b.w);
}
}
/* *************************************************************** */
__global__ void reg_addArrays_kernel_float(float *array1_d, float *array2_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
array1_d[tid] += array2_d[tid];
}
}
/* *************************************************************** */
__global__ void reg_addArrays_kernel_float4(float4 *array1_d, float4 *array2_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
float4 a = array1_d[tid];
float4 b = array1_d[tid];
array1_d[tid] = make_float4(a.x+b.x,a.y+b.y,a.z+b.z,a.w+b.w);
}
}
/* *************************************************************** */
__global__ void reg_fillMaskArray_kernel(int *array1_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber)
array1_d[tid] = tid;
}
/* *************************************************************** */
#endif
|
22f7e9fe2b9c0d93ece0e918c0fcef4994f87b58.cu
|
/*
* _reg_tools_kernels.cu
*
* Created by Marc Modat and Pankaj Daga on 24/03/2009.
* Copyright (c) 2009-2018, University College London
* Copyright (c) 2018, NiftyReg Developers.
* All rights reserved.
* See the LICENSE.txt file in the nifty_reg root folder
*/
#ifndef _REG_TOOLS_KERNELS_CU
#define _REG_TOOLS_KERNELS_CU
/* *************************************************************** */
__device__ __constant__ int c_NodeNumber;
__device__ __constant__ int c_VoxelNumber;
__device__ __constant__ int3 c_TargetImageDim;
__device__ __constant__ float3 c_VoxelNodeRatio;
__device__ __constant__ int3 c_ControlPointImageDim;
__device__ __constant__ int3 c_ImageDim;
__device__ __constant__ float c_Weight;
/* *************************************************************** */
texture<float4, 1, cudaReadModeElementType> controlPointTexture;
texture<float4, 1, cudaReadModeElementType> gradientImageTexture;
texture<float4, 1, cudaReadModeElementType> matrixTexture;
texture<float, 1, cudaReadModeElementType> convolutionKernelTexture;
/* *************************************************************** */
__global__ void reg_voxelCentric2NodeCentric_kernel(float4 *nodeNMIGradientArray_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_NodeNumber){
const int3 gridSize = c_ControlPointImageDim;
int tempIndex=tid;
const short z =(int)(tempIndex/(gridSize.x*gridSize.y));
tempIndex -= z*(gridSize.x)*(gridSize.y);
const short y =(int)(tempIndex/(gridSize.x));
const short x = tempIndex - y*(gridSize.x);
const float3 ratio = c_VoxelNodeRatio;
const short X = round((x-1)*ratio.x);
const short Y = round((y-1)*ratio.y);
const short Z = round((z-1)*ratio.z);
const int3 imageSize = c_TargetImageDim;
if(-1<X && X<imageSize.x && -1<Y && Y<imageSize.y && -1<Z && Z<imageSize.z){
int index = (Z*imageSize.y+Y)*imageSize.x+X;
float4 gradientValue = tex1Dfetch(gradientImageTexture,index);
nodeNMIGradientArray_d[tid] = make_float4(c_Weight*gradientValue.x,
c_Weight*gradientValue.y,
c_Weight*gradientValue.z,
0.0f);
}
else nodeNMIGradientArray_d[tid]=make_float4(0, 0.0f, 0.0f, 0.0f);
}
}
/* *************************************************************** */
__global__ void _reg_convertNMIGradientFromVoxelToRealSpace_kernel(float4 *gradient)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_NodeNumber){
float4 voxelGradient = gradient[tid];
float4 realGradient;
float4 matrix = tex1Dfetch(matrixTexture,0);
realGradient.x = matrix.x*voxelGradient.x + matrix.y*voxelGradient.y +
matrix.z*voxelGradient.z;
matrix = tex1Dfetch(matrixTexture,1);
realGradient.y = matrix.x*voxelGradient.x + matrix.y*voxelGradient.y +
matrix.z*voxelGradient.z;
matrix = tex1Dfetch(matrixTexture,2);
realGradient.z = matrix.x*voxelGradient.x + matrix.y*voxelGradient.y +
matrix.z*voxelGradient.z;
gradient[tid]=realGradient;
}
}
__global__ void _reg_ApplyConvolutionWindowAlongX_kernel( float4 *smoothedImage,
int windowSize)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
int3 imageSize = c_ImageDim;
int temp=tid;
const short z=(int)(temp/(imageSize.x*imageSize.y));
temp -= z*imageSize.x*imageSize.y;
const short y =(int)(temp/(imageSize.x));
short x = temp - y*(imageSize.x);
int radius = (windowSize-1)/2;
int index = tid - radius;
x -= radius;
float4 finalValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// Kahan summation used here
float3 c=make_float3(0.f,0.f,0.f), Y, t;
float windowValue;
for(int i=0; i<windowSize; i++){
if(-1<x && x<imageSize.x){
float4 gradientValue = tex1Dfetch(gradientImageTexture,index);
windowValue = tex1Dfetch(convolutionKernelTexture,i);
Y.x = gradientValue.x * windowValue - c.x;
Y.y = gradientValue.y * windowValue - c.y;
Y.z = gradientValue.z * windowValue - c.z;
t.x = finalValue.x + Y.x;
t.y = finalValue.y + Y.y;
t.z = finalValue.z + Y.z;
c.x = (t.x - finalValue.x) - Y.x;
c.y = (t.y - finalValue.y) - Y.y;
c.z = (t.z - finalValue.z) - Y.z;
finalValue = make_float4(t.x, t.y, t.z, 0.f);
}
index++;
x++;
}
smoothedImage[tid] = finalValue;
}
return;
}
__global__ void _reg_ApplyConvolutionWindowAlongY_kernel(float4 *smoothedImage,
int windowSize)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
int3 imageSize = c_ImageDim;
const short z=(int)(tid/(imageSize.x*imageSize.y));
int index = tid - z*imageSize.x*imageSize.y;
short y=(int)(index/imageSize.x);
int radius = (windowSize-1)/2;
index = tid - imageSize.x*radius;
y -= radius;
float4 finalValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// Kahan summation used here
float3 c=make_float3(0.f,0.f,0.f), Y, t;
float windowValue;
for(int i=0; i<windowSize; i++){
if(-1<y && y<imageSize.y){
float4 gradientValue = tex1Dfetch(gradientImageTexture,index);
windowValue = tex1Dfetch(convolutionKernelTexture,i);
Y.x = gradientValue.x * windowValue - c.x;
Y.y = gradientValue.y * windowValue - c.y;
Y.z = gradientValue.z * windowValue - c.z;
t.x = finalValue.x + Y.x;
t.y = finalValue.y + Y.y;
t.z = finalValue.z + Y.z;
c.x = (t.x - finalValue.x) - Y.x;
c.y = (t.y - finalValue.y) - Y.y;
c.z = (t.z - finalValue.z) - Y.z;
finalValue = make_float4(t.x, t.y, t.z, 0.f);
}
index += imageSize.x;
y++;
}
smoothedImage[tid] = finalValue;
}
return;
}
__global__ void _reg_ApplyConvolutionWindowAlongZ_kernel(float4 *smoothedImage,
int windowSize)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
int3 imageSize = c_ImageDim;
short z=(int)(tid/((imageSize.x)*(imageSize.y)));
int radius = (windowSize-1)/2;
int index = tid - imageSize.x*imageSize.y*radius;
z -= radius;
float4 finalValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// Kahan summation used here
float3 c=make_float3(0.f,0.f,0.f), Y, t;
float windowValue;
for(int i=0; i<windowSize; i++){
if(-1<z && z<imageSize.z){
float4 gradientValue = tex1Dfetch(gradientImageTexture,index);
windowValue = tex1Dfetch(convolutionKernelTexture,i);
Y.x = gradientValue.x * windowValue - c.x;
Y.y = gradientValue.y * windowValue - c.y;
Y.z = gradientValue.z * windowValue - c.z;
t.x = finalValue.x + Y.x;
t.y = finalValue.y + Y.y;
t.z = finalValue.z + Y.z;
c.x = (t.x - finalValue.x) - Y.x;
c.y = (t.y - finalValue.y) - Y.y;
c.z = (t.z - finalValue.z) - Y.z;
finalValue = make_float4(t.x, t.y, t.z, 0.f);
}
index += imageSize.x*imageSize.y;
z++;
}
smoothedImage[tid] = finalValue;
}
return;
}
/* *************************************************************** */
__global__ void reg_multiplyValue_kernel_float(float *array_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
array_d[tid] *= c_Weight;
}
}
/* *************************************************************** */
__global__ void reg_multiplyValue_kernel_float4(float4 *array_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
float4 temp = array_d[tid];
array_d[tid] = make_float4(temp.x*c_Weight,temp.y*c_Weight,temp.z*c_Weight,temp.w*c_Weight);
}
}
/* *************************************************************** */
__global__ void reg_addValue_kernel_float(float *array_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
array_d[tid] += c_Weight;
}
}
/* *************************************************************** */
__global__ void reg_addValue_kernel_float4(float4 *array_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
float4 temp = array_d[tid];
array_d[tid] = make_float4(temp.x+c_Weight,temp.y+c_Weight,temp.z+c_Weight,temp.w+c_Weight);
}
}
/* *************************************************************** */
__global__ void reg_multiplyArrays_kernel_float(float *array1_d, float *array2_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
array1_d[tid] *= array2_d[tid];
}
}
/* *************************************************************** */
__global__ void reg_multiplyArrays_kernel_float4(float4 *array1_d, float4 *array2_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
float4 a = array1_d[tid];
float4 b = array1_d[tid];
array1_d[tid] = make_float4(a.x*b.x,a.y*b.y,a.z*b.z,a.w*b.w);
}
}
/* *************************************************************** */
__global__ void reg_addArrays_kernel_float(float *array1_d, float *array2_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
array1_d[tid] += array2_d[tid];
}
}
/* *************************************************************** */
__global__ void reg_addArrays_kernel_float4(float4 *array1_d, float4 *array2_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
float4 a = array1_d[tid];
float4 b = array1_d[tid];
array1_d[tid] = make_float4(a.x+b.x,a.y+b.y,a.z+b.z,a.w+b.w);
}
}
/* *************************************************************** */
__global__ void reg_fillMaskArray_kernel(int *array1_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber)
array1_d[tid] = tid;
}
/* *************************************************************** */
#endif
|
0307e346f2e9e9f0407f90878886664a7a6cc28f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Architektura procesoru (ACH 2018)
* Projekt c.2 (CUDA)
* Login: xpawlu00
*/
#include <cmath>
#include <cfloat>
#include "nbody.h"
__global__ void calculate_velocity(t_particles p_in, t_particles p_out, int N, float dt) {
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
extern __shared__ float4 shM[];
// promenna vel, do ktere se budou akumulovat vypocty
float4 vel = {0.0f, 0.0f, 0.0f, 0.0f};
float4* shM_pos = shM;
float4* shM_vel = shM + blockDim.x;
float r;
float4 d;
float4 p1_pos, p1_vel;
if (idx < N) {
p1_pos = p_in.pos[idx];
p1_vel = p_in.vel[idx];
}
// pocet "dlazdic" == pocet bloku
for (int tile = 0; tile < gridDim.x; ++tile) {
int blockTile = blockDim.x * tile;
int index = blockTile + threadIdx.x;
// nacteni "dlazdice" do sdilene pameti - musi zde pracovat i vlakna, jejichz idx > N!
if (index < N) {
shM_pos[threadIdx.x] = p_in.pos[index];
shM_vel[threadIdx.x] = p_in.vel[index];
}
__syncthreads();
if (idx < N) {
int limit = (tile + 1) * blockDim.x > N ? (N - blockTile) : blockDim.x;
for (int i = 0; i < limit; i++) {
// stejny prvek
if (idx == blockTile + i)
continue;
float4 p2_pos = shM_pos[i];
float4 p2_vel = shM_vel[i];
d.x = p2_pos.x - p1_pos.x;
d.y = p2_pos.y - p1_pos.y;
d.z = p2_pos.z - p1_pos.z;
r = sqrt(d.x*d.x + d.y*d.y + d.z*d.z);
if (r > 0.0f && r < COLLISION_DISTANCE) {
float weight = p1_vel.w / p2_vel.w;
vel.x += (((weight - 1) * p1_vel.x + 2 * p2_vel.x)/(1 + weight) - p1_vel.x);
vel.y += (((weight - 1) * p1_vel.y + 2 * p2_vel.y)/(1 + weight) - p1_vel.y);
vel.z += (((weight - 1) * p1_vel.z + 2 * p2_vel.z)/(1 + weight) - p1_vel.z);
}
if (r > COLLISION_DISTANCE) {
vel.x += (G * p2_vel.w) / pow(r, 3) * (d.x) * dt;
vel.y += (G * p2_vel.w) / pow(r, 3) * (d.y) * dt;
vel.z += (G * p2_vel.w) / pow(r, 3) * (d.z) * dt;
}
}
}
__syncthreads();
}
if (idx < N) {
float4 tmp_vel = {p1_vel.x + vel.x, p1_vel.y + vel.y, p1_vel.z + vel.z, p1_vel.w};
p_out.vel[idx] = tmp_vel;
p_out.pos[idx].x = p1_pos.x + tmp_vel.x * dt;
p_out.pos[idx].y = p1_pos.y + tmp_vel.y * dt;
p_out.pos[idx].z = p1_pos.z + tmp_vel.z * dt;
}
}
__global__ void calculate_gravitation_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
float r;
float4 d;
float4 p1_pos = p.pos[idx];
float4 vel = {0.0f, 0.0f, 0.0f};
for (int i = 0; i < N; i++) {
if (i == idx)
continue;
float4 p2_pos = p.pos[i];
d.x = p2_pos.x - p1_pos.x;
d.y = p2_pos.y - p1_pos.y;
d.z = p2_pos.z - p1_pos.z;
r = sqrt(d.x*d.x + d.y*d.y + d.z*d.z);
if (r > COLLISION_DISTANCE) {
float p2_weight = p.vel[i].w;
vel.x += (G * p2_weight) / pow(r, 3) * (d.x) * dt;
vel.y += (G * p2_weight) / pow(r, 3) * (d.y) * dt;
vel.z += (G * p2_weight) / pow(r, 3) * (d.z) * dt;
}
}
tmp_vel.vel[idx] = vel;
}
__global__ void calculate_collision_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
float4 d;
float r;
float4 p1_pos = p.pos[idx];
float4 tmp_vel_local = tmp_vel.vel[idx];
float4 p1_vel = p.vel[idx];
float weight1 = p1_vel.w;
for (int i = 0; i < N; i++) {
if (i == idx)
continue;
float4 p2_pos = p.pos[i];
d.x = p2_pos.x - p1_pos.x;
d.y = p2_pos.y - p1_pos.y;
d.z = p2_pos.z - p1_pos.z;
float4 p2_vel = p.vel[i];
float weight2 = p2_vel.w;
r = sqrt(d.x*d.x + d.y*d.y + d.z*d.z);
if (r > 0.0f && r < COLLISION_DISTANCE) {
float weight = weight1 / weight2;
tmp_vel_local.x += (((weight - 1) * p1_vel.x + 2 * p2_vel.x)/(1 + weight) - p1_vel.x);
tmp_vel_local.y += (((weight - 1) * p1_vel.y + 2 * p2_vel.y)/(1 + weight) - p1_vel.y);
tmp_vel_local.z += (((weight - 1) * p1_vel.z + 2 * p2_vel.z)/(1 + weight) - p1_vel.z);
}
}
tmp_vel.vel[idx] = tmp_vel_local;
}
__global__ void update_particle(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
p.vel[idx].x += tmp_vel.vel[idx].x;
p.vel[idx].y += tmp_vel.vel[idx].y;
p.vel[idx].z += tmp_vel.vel[idx].z;
p.pos[idx].x += p.vel[idx].x * dt;
p.pos[idx].y += p.vel[idx].y * dt;
p.pos[idx].z += p.vel[idx].z * dt;
}
__host__ void particles_read(FILE *fp, t_particles &p, int N)
{
for (int i = 0; i < N; i++)
{
fscanf(fp, "%f %f %f %f %f %f %f \n",
&p.pos[i].x, &p.pos[i].y, &p.pos[i].z,
&p.vel[i].x, &p.vel[i].y, &p.vel[i].z, &p.vel[i].w);
}
}
__host__ void particles_write(FILE *fp, t_particles &p, int N)
{
for (int i = 0; i < N; i++)
{
fprintf(fp, "%f %f %f %f %f %f %f \n",
p.pos[i].x, p.pos[i].y, p.pos[i].z,
p.vel[i].x, p.vel[i].y, p.vel[i].z, p.vel[i].w);
}
}
|
0307e346f2e9e9f0407f90878886664a7a6cc28f.cu
|
/*
* Architektura procesoru (ACH 2018)
* Projekt c.2 (CUDA)
* Login: xpawlu00
*/
#include <cmath>
#include <cfloat>
#include "nbody.h"
__global__ void calculate_velocity(t_particles p_in, t_particles p_out, int N, float dt) {
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
extern __shared__ float4 shM[];
// promenna vel, do ktere se budou akumulovat vypocty
float4 vel = {0.0f, 0.0f, 0.0f, 0.0f};
float4* shM_pos = shM;
float4* shM_vel = shM + blockDim.x;
float r;
float4 d;
float4 p1_pos, p1_vel;
if (idx < N) {
p1_pos = p_in.pos[idx];
p1_vel = p_in.vel[idx];
}
// pocet "dlazdic" == pocet bloku
for (int tile = 0; tile < gridDim.x; ++tile) {
int blockTile = blockDim.x * tile;
int index = blockTile + threadIdx.x;
// nacteni "dlazdice" do sdilene pameti - musi zde pracovat i vlakna, jejichz idx > N!
if (index < N) {
shM_pos[threadIdx.x] = p_in.pos[index];
shM_vel[threadIdx.x] = p_in.vel[index];
}
__syncthreads();
if (idx < N) {
int limit = (tile + 1) * blockDim.x > N ? (N - blockTile) : blockDim.x;
for (int i = 0; i < limit; i++) {
// stejny prvek
if (idx == blockTile + i)
continue;
float4 p2_pos = shM_pos[i];
float4 p2_vel = shM_vel[i];
d.x = p2_pos.x - p1_pos.x;
d.y = p2_pos.y - p1_pos.y;
d.z = p2_pos.z - p1_pos.z;
r = sqrt(d.x*d.x + d.y*d.y + d.z*d.z);
if (r > 0.0f && r < COLLISION_DISTANCE) {
float weight = p1_vel.w / p2_vel.w;
vel.x += (((weight - 1) * p1_vel.x + 2 * p2_vel.x)/(1 + weight) - p1_vel.x);
vel.y += (((weight - 1) * p1_vel.y + 2 * p2_vel.y)/(1 + weight) - p1_vel.y);
vel.z += (((weight - 1) * p1_vel.z + 2 * p2_vel.z)/(1 + weight) - p1_vel.z);
}
if (r > COLLISION_DISTANCE) {
vel.x += (G * p2_vel.w) / pow(r, 3) * (d.x) * dt;
vel.y += (G * p2_vel.w) / pow(r, 3) * (d.y) * dt;
vel.z += (G * p2_vel.w) / pow(r, 3) * (d.z) * dt;
}
}
}
__syncthreads();
}
if (idx < N) {
float4 tmp_vel = {p1_vel.x + vel.x, p1_vel.y + vel.y, p1_vel.z + vel.z, p1_vel.w};
p_out.vel[idx] = tmp_vel;
p_out.pos[idx].x = p1_pos.x + tmp_vel.x * dt;
p_out.pos[idx].y = p1_pos.y + tmp_vel.y * dt;
p_out.pos[idx].z = p1_pos.z + tmp_vel.z * dt;
}
}
__global__ void calculate_gravitation_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
float r;
float4 d;
float4 p1_pos = p.pos[idx];
float4 vel = {0.0f, 0.0f, 0.0f};
for (int i = 0; i < N; i++) {
if (i == idx)
continue;
float4 p2_pos = p.pos[i];
d.x = p2_pos.x - p1_pos.x;
d.y = p2_pos.y - p1_pos.y;
d.z = p2_pos.z - p1_pos.z;
r = sqrt(d.x*d.x + d.y*d.y + d.z*d.z);
if (r > COLLISION_DISTANCE) {
float p2_weight = p.vel[i].w;
vel.x += (G * p2_weight) / pow(r, 3) * (d.x) * dt;
vel.y += (G * p2_weight) / pow(r, 3) * (d.y) * dt;
vel.z += (G * p2_weight) / pow(r, 3) * (d.z) * dt;
}
}
tmp_vel.vel[idx] = vel;
}
__global__ void calculate_collision_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
float4 d;
float r;
float4 p1_pos = p.pos[idx];
float4 tmp_vel_local = tmp_vel.vel[idx];
float4 p1_vel = p.vel[idx];
float weight1 = p1_vel.w;
for (int i = 0; i < N; i++) {
if (i == idx)
continue;
float4 p2_pos = p.pos[i];
d.x = p2_pos.x - p1_pos.x;
d.y = p2_pos.y - p1_pos.y;
d.z = p2_pos.z - p1_pos.z;
float4 p2_vel = p.vel[i];
float weight2 = p2_vel.w;
r = sqrt(d.x*d.x + d.y*d.y + d.z*d.z);
if (r > 0.0f && r < COLLISION_DISTANCE) {
float weight = weight1 / weight2;
tmp_vel_local.x += (((weight - 1) * p1_vel.x + 2 * p2_vel.x)/(1 + weight) - p1_vel.x);
tmp_vel_local.y += (((weight - 1) * p1_vel.y + 2 * p2_vel.y)/(1 + weight) - p1_vel.y);
tmp_vel_local.z += (((weight - 1) * p1_vel.z + 2 * p2_vel.z)/(1 + weight) - p1_vel.z);
}
}
tmp_vel.vel[idx] = tmp_vel_local;
}
__global__ void update_particle(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
p.vel[idx].x += tmp_vel.vel[idx].x;
p.vel[idx].y += tmp_vel.vel[idx].y;
p.vel[idx].z += tmp_vel.vel[idx].z;
p.pos[idx].x += p.vel[idx].x * dt;
p.pos[idx].y += p.vel[idx].y * dt;
p.pos[idx].z += p.vel[idx].z * dt;
}
__host__ void particles_read(FILE *fp, t_particles &p, int N)
{
for (int i = 0; i < N; i++)
{
fscanf(fp, "%f %f %f %f %f %f %f \n",
&p.pos[i].x, &p.pos[i].y, &p.pos[i].z,
&p.vel[i].x, &p.vel[i].y, &p.vel[i].z, &p.vel[i].w);
}
}
__host__ void particles_write(FILE *fp, t_particles &p, int N)
{
for (int i = 0; i < N; i++)
{
fprintf(fp, "%f %f %f %f %f %f %f \n",
p.pos[i].x, p.pos[i].y, p.pos[i].z,
p.vel[i].x, p.vel[i].y, p.vel[i].z, p.vel[i].w);
}
}
|
3dd2ed7ac96aa1863437be61ce7ec08abeba28c5.hip
|
// !!! This is a file automatically generated by hipify!!!
/***********************************************
* # Copyright 2009. Liu Yongchao
* # Contact: Liu Yongchao
* # [email protected]; [email protected]
* #
* # GPL 2.0 applies.
* #
* ************************************************/
#include "GenericFunction.h"
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
#define CUERR do{ hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
int device; \
hipGetDevice(&device); \
printf("CUDA error on GPU %d: %s : %s, line %d\n", device, hipGetErrorString(err), __FILE__, __LINE__); }}while(0);
static const enum hipMemcpyKind kinds[] = { hipMemcpyHostToDevice,
hipMemcpyDeviceToHost, hipMemcpyDeviceToDevice };
GPUInfo* gpuInfo = 0;
GPUInfo* pInitDevice(int argc, char* argv[]) {
int i;
gpuInfo = (GPUInfo*) malloc(sizeof(GPUInfo));
if (!gpuInfo) {
fprintf(stderr, "memory allocation failed\n");
exit(-1);
}
//get the number of CUDA-enabled GPUs
gpuInfo->n_device = 0;
hipGetDeviceCount(&gpuInfo->n_device);
CUERR
if (gpuInfo->n_device <= 0) {
fprintf(stderr, "There is no CUDA-enabled device avaiable\n");
exit(-1);
}
gpuInfo->devices = (int*) malloc(sizeof(int) * gpuInfo->n_device);
gpuInfo->props = (hipDeviceProp_t*) malloc(
sizeof(hipDeviceProp_t) * gpuInfo->n_device);
int realDevice = 0;
for (i = 0; i < gpuInfo->n_device; i++) {
gpuInfo->devices[realDevice] = i;
hipGetDeviceProperties(&gpuInfo->props[realDevice], i);
CUERR
/*check the compute capability*/
if (gpuInfo->props[realDevice].regsPerBlock < 16384
|| gpuInfo->props[realDevice].major < 3) {
continue;
}
realDevice++;
}
gpuInfo->n_device = realDevice;
return gpuInfo;
}
void pExitDevice(GPUInfo* info) {
if (!info)
return;
if (info->devices)
free(info->devices);
if (info->props)
free(info->props);
}
GPUInfo* pGetGPUInfo() {
return gpuInfo;
}
void printGPUInfo(GPUInfo* gpuInfo)
{
for(int realDevice = 0; realDevice < gpuInfo->n_device; ++realDevice)
{
fprintf(stderr, "\n---------device(%d)-------------\n", realDevice);
fprintf(stderr, "name:%s\n", gpuInfo->props[realDevice].name);
fprintf(stderr, "multiprocessor count:%d\n",
gpuInfo->props[realDevice].multiProcessorCount);
fprintf(stderr, "clock rate:%d MHz\n",
gpuInfo->props[realDevice].clockRate);
fprintf(stderr, "shared memory:%ld\n",
gpuInfo->props[realDevice].sharedMemPerBlock);
fprintf(stderr, "global memory:%ld\n",
gpuInfo->props[realDevice].totalGlobalMem);
fprintf(stderr, "registers per block:%d\n",
gpuInfo->props[realDevice].regsPerBlock);
fprintf(stderr, "Compute capability: %d.%d\n", gpuInfo->props[realDevice].major,
gpuInfo->props[realDevice].minor);
fprintf(stderr, "L2 cache size: %d\n",
gpuInfo->props[realDevice].l2CacheSize);
/*calculated by MAX_TEXTURE_CACHE / (25 * 25 * sizeof(short))*/
fprintf(stderr, "Max Query Length for Query Profile Variant: %d\n",
(int)(gpuInfo->props[realDevice].l2CacheSize / 1250));
}
fprintf(stderr, "Only %d devices with compute capability >= 3.0\n",
gpuInfo->n_device);
}
void pSetDevice(GPUInfo* info, int dev) {
hipSetDevice(dev);
CUERR
}
float pGetClockRate(GPUInfo* info, int dev) {
float frequency = info->props[dev].clockRate;
frequency /= 1000000;
return frequency; /*in GHz*/
}
int pGetMultiProcessorCount(GPUInfo* info, int dev) {
return info->props[dev].multiProcessorCount;
}
int pGetRegistersPerBlock(GPUInfo* info, int dev) {
return info->props[dev].regsPerBlock;
}
int pGetL2CacheSize(GPUInfo* info, int dev) {
return info->props[dev].l2CacheSize;
}
void* pMallocHost(size_t size) {
void* host;
#ifndef UNIX_EMU
hipHostMalloc(&host, size);
#else
host = malloc(size);
#endif
CUERR
return host;
}
void pFreeHost(void*host) {
#ifndef UNIX_EMU
hipHostFree(host);
#else
if(host) free(host);
#endif
CUERR
}
void* pMallocPitch(size_t block_size, size_t width, size_t height,
size_t* pitch) {
void* device;
size_t devPitch;
if (!pitch) {
pitch = &devPitch;
}
hipMallocPitch((void**) &device, pitch, block_size * width, height);
CUERR
return device;
}
void pFree(void*device) {
hipFree(device);
CUERR
}
void pFreeArray(void*array) {
hipFreeArray((hipArray*) array);
CUERR
}
void pMemcpy(void*dst, const void* src, size_t count, int kind) {
hipMemcpy(dst, src, count, kinds[kind]);
CUERR
}
void pMemcpy2D(void* dst, size_t dpitch, const void* src, size_t spitch,
size_t width, size_t height, int kind) {
hipMemcpy2D(dst, dpitch, src, spitch, width, height, kinds[kind]);
CUERR
}
void pMemcpyToArray(void*dst, int x, int y, const void* src, size_t count,
int kind) {
hipMemcpyToArray((hipArray*) dst, x, y, src, count, kinds[kind]);
CUERR
}
void pMemcpy2DToArray(void* dst, int dstx, int dsty, void*src, size_t src_pitch,
size_t width, size_t height, int kind) {
hipMemcpy2DToArray((hipArray*) dst, dstx, dsty, src, src_pitch, width,
height, kinds[kind]);
CUERR
}
void pMemcpy2DFromArray(void*dst, size_t pitch, void*src, size_t srcx,
size_t srcy, size_t width, size_t height, int kind) {
hipMemcpy2DFromArray(dst, pitch, (hipArray*) src, srcx, srcy, width,
height, kinds[kind]);
CUERR
}
|
3dd2ed7ac96aa1863437be61ce7ec08abeba28c5.cu
|
/***********************************************
* # Copyright 2009. Liu Yongchao
* # Contact: Liu Yongchao
* # [email protected]; [email protected]
* #
* # GPL 2.0 applies.
* #
* ************************************************/
#include "GenericFunction.h"
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
#define CUERR do{ cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
int device; \
cudaGetDevice(&device); \
printf("CUDA error on GPU %d: %s : %s, line %d\n", device, cudaGetErrorString(err), __FILE__, __LINE__); }}while(0);
static const enum cudaMemcpyKind kinds[] = { cudaMemcpyHostToDevice,
cudaMemcpyDeviceToHost, cudaMemcpyDeviceToDevice };
GPUInfo* gpuInfo = 0;
GPUInfo* pInitDevice(int argc, char* argv[]) {
int i;
gpuInfo = (GPUInfo*) malloc(sizeof(GPUInfo));
if (!gpuInfo) {
fprintf(stderr, "memory allocation failed\n");
exit(-1);
}
//get the number of CUDA-enabled GPUs
gpuInfo->n_device = 0;
cudaGetDeviceCount(&gpuInfo->n_device);
CUERR
if (gpuInfo->n_device <= 0) {
fprintf(stderr, "There is no CUDA-enabled device avaiable\n");
exit(-1);
}
gpuInfo->devices = (int*) malloc(sizeof(int) * gpuInfo->n_device);
gpuInfo->props = (cudaDeviceProp*) malloc(
sizeof(cudaDeviceProp) * gpuInfo->n_device);
int realDevice = 0;
for (i = 0; i < gpuInfo->n_device; i++) {
gpuInfo->devices[realDevice] = i;
cudaGetDeviceProperties(&gpuInfo->props[realDevice], i);
CUERR
/*check the compute capability*/
if (gpuInfo->props[realDevice].regsPerBlock < 16384
|| gpuInfo->props[realDevice].major < 3) {
continue;
}
realDevice++;
}
gpuInfo->n_device = realDevice;
return gpuInfo;
}
void pExitDevice(GPUInfo* info) {
if (!info)
return;
if (info->devices)
free(info->devices);
if (info->props)
free(info->props);
}
GPUInfo* pGetGPUInfo() {
return gpuInfo;
}
void printGPUInfo(GPUInfo* gpuInfo)
{
for(int realDevice = 0; realDevice < gpuInfo->n_device; ++realDevice)
{
fprintf(stderr, "\n---------device(%d)-------------\n", realDevice);
fprintf(stderr, "name:%s\n", gpuInfo->props[realDevice].name);
fprintf(stderr, "multiprocessor count:%d\n",
gpuInfo->props[realDevice].multiProcessorCount);
fprintf(stderr, "clock rate:%d MHz\n",
gpuInfo->props[realDevice].clockRate);
fprintf(stderr, "shared memory:%ld\n",
gpuInfo->props[realDevice].sharedMemPerBlock);
fprintf(stderr, "global memory:%ld\n",
gpuInfo->props[realDevice].totalGlobalMem);
fprintf(stderr, "registers per block:%d\n",
gpuInfo->props[realDevice].regsPerBlock);
fprintf(stderr, "Compute capability: %d.%d\n", gpuInfo->props[realDevice].major,
gpuInfo->props[realDevice].minor);
fprintf(stderr, "L2 cache size: %d\n",
gpuInfo->props[realDevice].l2CacheSize);
/*calculated by MAX_TEXTURE_CACHE / (25 * 25 * sizeof(short))*/
fprintf(stderr, "Max Query Length for Query Profile Variant: %d\n",
(int)(gpuInfo->props[realDevice].l2CacheSize / 1250));
}
fprintf(stderr, "Only %d devices with compute capability >= 3.0\n",
gpuInfo->n_device);
}
void pSetDevice(GPUInfo* info, int dev) {
cudaSetDevice(dev);
CUERR
}
float pGetClockRate(GPUInfo* info, int dev) {
float frequency = info->props[dev].clockRate;
frequency /= 1000000;
return frequency; /*in GHz*/
}
int pGetMultiProcessorCount(GPUInfo* info, int dev) {
return info->props[dev].multiProcessorCount;
}
int pGetRegistersPerBlock(GPUInfo* info, int dev) {
return info->props[dev].regsPerBlock;
}
int pGetL2CacheSize(GPUInfo* info, int dev) {
return info->props[dev].l2CacheSize;
}
void* pMallocHost(size_t size) {
void* host;
#ifndef UNIX_EMU
cudaMallocHost(&host, size);
#else
host = malloc(size);
#endif
CUERR
return host;
}
void pFreeHost(void*host) {
#ifndef UNIX_EMU
cudaFreeHost(host);
#else
if(host) free(host);
#endif
CUERR
}
void* pMallocPitch(size_t block_size, size_t width, size_t height,
size_t* pitch) {
void* device;
size_t devPitch;
if (!pitch) {
pitch = &devPitch;
}
cudaMallocPitch((void**) &device, pitch, block_size * width, height);
CUERR
return device;
}
void pFree(void*device) {
cudaFree(device);
CUERR
}
void pFreeArray(void*array) {
cudaFreeArray((cudaArray*) array);
CUERR
}
void pMemcpy(void*dst, const void* src, size_t count, int kind) {
cudaMemcpy(dst, src, count, kinds[kind]);
CUERR
}
void pMemcpy2D(void* dst, size_t dpitch, const void* src, size_t spitch,
size_t width, size_t height, int kind) {
cudaMemcpy2D(dst, dpitch, src, spitch, width, height, kinds[kind]);
CUERR
}
void pMemcpyToArray(void*dst, int x, int y, const void* src, size_t count,
int kind) {
cudaMemcpyToArray((cudaArray*) dst, x, y, src, count, kinds[kind]);
CUERR
}
void pMemcpy2DToArray(void* dst, int dstx, int dsty, void*src, size_t src_pitch,
size_t width, size_t height, int kind) {
cudaMemcpy2DToArray((cudaArray*) dst, dstx, dsty, src, src_pitch, width,
height, kinds[kind]);
CUERR
}
void pMemcpy2DFromArray(void*dst, size_t pitch, void*src, size_t srcx,
size_t srcy, size_t width, size_t height, int kind) {
cudaMemcpy2DFromArray(dst, pitch, (cudaArray*) src, srcx, srcy, width,
height, kinds[kind]);
CUERR
}
|
c6f93c8d9a369a3e04feaa82723efb05f3d64093.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#define M 50
#define tpb 256
#define bpg 1
__device__
bool is_same_block(int element_i, int other_i) {
return ((element_i + 10) / 10 == (other_i + 10) / 10);
}
__global__
void modify(int n, int *vector) {
__shared__ int s[M];
int i = threadIdx.x;
if (i < n) {
s[i] = i * 10 + 123 - 456;
__syncthreads();
if (i + 1 < n) {
int val = s[(i + 1)];
vector[i] = val;
} else {
vector[i] = 0;
}
}
}
int main() {
int *vector, *d_vector;
vector = new int[M];
for (int i = 0; i < M; i += 1) {
vector[i] = 0;
}
hipMalloc(&d_vector, M * sizeof(int));
hipMemcpy(d_vector, vector, M * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( modify), dim3(bpg), dim3(tpb), 0, 0, M, d_vector);
hipMemcpy(vector, d_vector, M * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < M; i += 1) {
printf("%d\t", vector[i]);
}
hipFree(d_vector);
free(vector);
hipDeviceReset();
return 0;
}
|
c6f93c8d9a369a3e04feaa82723efb05f3d64093.cu
|
#include <iostream>
#define M 50
#define tpb 256
#define bpg 1
__device__
bool is_same_block(int element_i, int other_i) {
return ((element_i + 10) / 10 == (other_i + 10) / 10);
}
__global__
void modify(int n, int *vector) {
__shared__ int s[M];
int i = threadIdx.x;
if (i < n) {
s[i] = i * 10 + 123 - 456;
__syncthreads();
if (i + 1 < n) {
int val = s[(i + 1)];
vector[i] = val;
} else {
vector[i] = 0;
}
}
}
int main() {
int *vector, *d_vector;
vector = new int[M];
for (int i = 0; i < M; i += 1) {
vector[i] = 0;
}
cudaMalloc(&d_vector, M * sizeof(int));
cudaMemcpy(d_vector, vector, M * sizeof(int), cudaMemcpyHostToDevice);
modify<<<bpg, tpb>>>(M, d_vector);
cudaMemcpy(vector, d_vector, M * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < M; i += 1) {
printf("%d\t", vector[i]);
}
cudaFree(d_vector);
free(vector);
cudaDeviceReset();
return 0;
}
|
d759e3a73597d9a84023c897c786f4206c529bef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm> // perf stats
#include <cstring>
#include <iomanip>
#include <iostream>
#include <numeric> // perf stats
#include <unistd.h>
#include <vector>
#include "CPPProcess.h"
#include "HelAmps_sm.h"
#include "rambo.h"
#include "timer.h"
#define gpuErrchk3(ans) \
{ gpuAssert3((ans), __FILE__, __LINE__); }
inline void gpuAssert3(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
printf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
}
}
#define TIMERTYPE std::chrono::high_resolution_clock
bool is_number(const char *s) {
const char *t = s;
while (*t != '\0' && isdigit(*t))
++t;
return strlen(s) == t - s;
}
int usage(char* argv0, int ret = 1) {
std::cout << "Usage: " << argv0
<< " [--verbose|-v] [--debug|-d] [--performance|-p]"
<< " [#gpuBlocksPerGrid #gpuThreadsPerBlock] #iterations" << std::endl;
return ret;
}
int main(int argc, char **argv) {
bool verbose = false, debug = false, perf = false;
int numiter = 0, gpublocks = 1, gputhreads = 1;
std::vector<int> numvec;
Timer<TIMERTYPE> timer;
std::vector<float> wavetimes;
for (int argn = 1; argn < argc; ++argn) {
if (strcmp(argv[argn], "--verbose") == 0 || strcmp(argv[argn], "-v") == 0)
verbose = true;
else if (strcmp(argv[argn], "--debug") == 0 ||
strcmp(argv[argn], "-d") == 0)
debug = true;
else if (strcmp(argv[argn], "--performance") == 0 ||
strcmp(argv[argn], "-p") == 0)
perf = true;
else if (is_number(argv[argn]))
numvec.push_back(atoi(argv[argn]));
else
return usage(argv[0]);
}
int veclen = numvec.size();
if (veclen == 3) {
gpublocks = numvec[0];
gputhreads = numvec[1];
numiter = numvec[2];
} else if (veclen == 1) {
numiter = numvec[0];
} else {
return usage(argv[0]);
}
if (numiter == 0)
return usage(argv[0]);
hipFree(0);
if (verbose)
std::cout << "# iterations: " << numiter << std::endl;
// Create a process object
CPPProcess process(numiter, gpublocks, gputhreads, verbose, debug);
// Read param_card and set parameters
process.initProc("../../Cards/param_card.dat");
double energy = 1500;
double weight;
int meGeVexponent = -(2 * process.nexternal - 8);
int dim = gpublocks * gputhreads;
// Local Memory
//typedef double arr_t[6][4];
double* lp = new double[6*3*dim];
double* meHostPtr = new double[dim*1];
double *meDevPtr =0;
int num_bytes_back = 1 * dim * sizeof(double);
hipMalloc((void**)&meDevPtr, num_bytes_back);
std::vector<double> matrixelementvector;
for (int x = 0; x < numiter; ++x) {
// Get phase space point
std::vector<std::vector<double *>> p =
get_momenta(process.ninitial, energy, process.getMasses(), weight, dim);
// Set momenta for this event
for (int d = 0; d < dim; ++d) {
for (int i = 0; i < 6; ++i) {
for (int j = 0; j < 3; ++j) {
lp[i*dim*3+j*dim+d] = p[d][i][1+j];
}
}
}
//new
int num_bytes = 3*6*dim * sizeof(double);
double *allmomenta = 0;
hipMalloc((void**)&allmomenta, num_bytes);
hipMemcpy(allmomenta,lp,num_bytes,hipMemcpyHostToDevice);
//gpuErrchk3(hipMemcpy3D(&tdp));
//process.preSigmaKin();
if (perf) {
timer.Start();
}
// Evaluate matrix element
// later process.sigmaKin(ncomb, goodhel, ntry, sum_hel, ngood, igood,
// jhel);
hipLaunchKernelGGL(( sigmaKin), dim3(gpublocks), dim3(gputhreads), 0, 0, allmomenta, meDevPtr);//, debug, verbose);
gpuErrchk3( hipPeekAtLastError() );
//gpuErrchk3(hipMemcpy2D(meHostPtr, sizeof(double), meDevPtr, mePitch,
// sizeof(double), dim, hipMemcpyDeviceToHost));
hipMemcpy(meHostPtr, meDevPtr, 1 * dim*sizeof(double), hipMemcpyDeviceToHost);
if (verbose)
std::cout << "***********************************" << std::endl
<< "Iteration #" << x+1 << " of " << numiter << std::endl;
if (perf) {
float gputime = timer.GetDuration();
wavetimes.push_back(gputime);
if (verbose)
std::cout << "Wave function time: " << gputime << std::endl;
}
if (verbose || perf) {
for (int d = 0; d < dim; ++d) {
if (verbose) {
std::cout << "Momenta:" << std::endl;
for (int i = 0; i < process.nexternal; i++)
std::cout << std::setw(4) << i + 1
<< setiosflags(std::ios::scientific) << std::setw(14)
<< p[d][i][0] << setiosflags(std::ios::scientific)
<< std::setw(14) << p[d][i][1]
<< setiosflags(std::ios::scientific) << std::setw(14)
<< p[d][i][2] << setiosflags(std::ios::scientific)
<< std::setw(14) << p[d][i][3] << std::endl;
std::cout << std::string(80, '-') << std::endl;
}
// Display matrix elements
for (int i = 0; i < process.nprocesses; i++) {
if (verbose)
std::cout << " Matrix element = "
// << setiosflags(ios::fixed) << setprecision(17)
<< meHostPtr[i*1 + d] << " GeV^" << meGeVexponent << std::endl;
if (perf)
matrixelementvector.push_back(meHostPtr[i*1 + d]);
}
if (verbose)
std::cout << std::string(80, '-') << std::endl;
}
} else if (!debug) {
std::cout << ".";
}
for (std::vector<std::vector<double *>>::iterator it = p.begin();
it != p.end(); ++it) {
for (std::vector<double *>::iterator jt = it->begin(); jt != it->end();
++jt) {
delete[] & (**jt);
}
}
}
if (!(verbose || debug || perf)) {
std::cout << std::endl;
}
if (perf) {
float sum = std::accumulate(wavetimes.begin(), wavetimes.end(), 0.0);
int num_wts = wavetimes.size();
float mean = sum / num_wts;
float sq_sum = std::inner_product(wavetimes.begin(), wavetimes.end(),
wavetimes.begin(), 0.0);
float stdev = std::sqrt(sq_sum / num_wts - mean * mean);
std::vector<float>::iterator mintime =
std::min_element(wavetimes.begin(), wavetimes.end());
std::vector<float>::iterator maxtime =
std::max_element(wavetimes.begin(), wavetimes.end());
int num_mes = matrixelementvector.size();
float sumelem = std::accumulate(matrixelementvector.begin(), matrixelementvector.end(), 0.0);
float meanelem = sumelem / num_mes;
float sqselem = std::inner_product(matrixelementvector.begin(), matrixelementvector.end(),
matrixelementvector.begin(), 0.0);
float stdelem = std::sqrt(sqselem / num_mes - meanelem * meanelem);
std::vector<double>::iterator maxelem = std::max_element(
matrixelementvector.begin(), matrixelementvector.end());
std::vector<double>::iterator minelem = std::min_element(
matrixelementvector.begin(), matrixelementvector.end());
std::cout << "***********************************" << std::endl
<< "NumIterations = " << numiter << std::endl
<< "NumThreadsPerBlock = " << gputhreads << std::endl
<< "NumBlocksPerGrid = " << gpublocks << std::endl
<< "-----------------------------------" << std::endl
<< "NumberOfEntries = " << num_wts << std::endl
<< std::scientific
<< "TotalTimeInWaveFuncs = " << sum << " sec" << std::endl
<< "MeanTimeInWaveFuncs = " << mean << " sec" << std::endl
<< "StdDevTimeInWaveFuncs = " << stdev << " sec" << std::endl
<< "MinTimeInWaveFuncs = " << *mintime << " sec" << std::endl
<< "MaxTimeInWaveFuncs = " << *maxtime << " sec" << std::endl
<< "-----------------------------------" << std::endl
<< "ProcessID: = " << getpid() << std::endl
<< "NProcesses = " << process.nprocesses << std::endl
<< "NumMatrixElements = " << num_mes << std::endl
<< "MatrixElementsPerSec = " << num_mes/sum << " sec^-1" << std::endl;
std::cout << "***********************************" << std::endl
<< "NumMatrixElements = " << num_mes << std::endl
<< std::scientific
<< "MeanMatrixElemValue = " << meanelem << " GeV^" << meGeVexponent << std::endl
<< "StdErrMatrixElemValue = " << stdelem/sqrt(num_mes) << " GeV^" << meGeVexponent << std::endl
<< "StdDevMatrixElemValue = " << stdelem << " GeV^" << meGeVexponent << std::endl
<< "MinMatrixElemValue = " << *minelem << " GeV^" << meGeVexponent << std::endl
<< "MaxMatrixElemValue = " << *maxelem << " GeV^" << meGeVexponent << std::endl;
}
delete[] lp;
}
|
d759e3a73597d9a84023c897c786f4206c529bef.cu
|
#include <algorithm> // perf stats
#include <cstring>
#include <iomanip>
#include <iostream>
#include <numeric> // perf stats
#include <unistd.h>
#include <vector>
#include "CPPProcess.h"
#include "HelAmps_sm.h"
#include "rambo.h"
#include "timer.h"
#define gpuErrchk3(ans) \
{ gpuAssert3((ans), __FILE__, __LINE__); }
inline void gpuAssert3(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
}
}
#define TIMERTYPE std::chrono::high_resolution_clock
bool is_number(const char *s) {
const char *t = s;
while (*t != '\0' && isdigit(*t))
++t;
return strlen(s) == t - s;
}
int usage(char* argv0, int ret = 1) {
std::cout << "Usage: " << argv0
<< " [--verbose|-v] [--debug|-d] [--performance|-p]"
<< " [#gpuBlocksPerGrid #gpuThreadsPerBlock] #iterations" << std::endl;
return ret;
}
int main(int argc, char **argv) {
bool verbose = false, debug = false, perf = false;
int numiter = 0, gpublocks = 1, gputhreads = 1;
std::vector<int> numvec;
Timer<TIMERTYPE> timer;
std::vector<float> wavetimes;
for (int argn = 1; argn < argc; ++argn) {
if (strcmp(argv[argn], "--verbose") == 0 || strcmp(argv[argn], "-v") == 0)
verbose = true;
else if (strcmp(argv[argn], "--debug") == 0 ||
strcmp(argv[argn], "-d") == 0)
debug = true;
else if (strcmp(argv[argn], "--performance") == 0 ||
strcmp(argv[argn], "-p") == 0)
perf = true;
else if (is_number(argv[argn]))
numvec.push_back(atoi(argv[argn]));
else
return usage(argv[0]);
}
int veclen = numvec.size();
if (veclen == 3) {
gpublocks = numvec[0];
gputhreads = numvec[1];
numiter = numvec[2];
} else if (veclen == 1) {
numiter = numvec[0];
} else {
return usage(argv[0]);
}
if (numiter == 0)
return usage(argv[0]);
cudaFree(0);
if (verbose)
std::cout << "# iterations: " << numiter << std::endl;
// Create a process object
CPPProcess process(numiter, gpublocks, gputhreads, verbose, debug);
// Read param_card and set parameters
process.initProc("../../Cards/param_card.dat");
double energy = 1500;
double weight;
int meGeVexponent = -(2 * process.nexternal - 8);
int dim = gpublocks * gputhreads;
// Local Memory
//typedef double arr_t[6][4];
double* lp = new double[6*3*dim];
double* meHostPtr = new double[dim*1];
double *meDevPtr =0;
int num_bytes_back = 1 * dim * sizeof(double);
cudaMalloc((void**)&meDevPtr, num_bytes_back);
std::vector<double> matrixelementvector;
for (int x = 0; x < numiter; ++x) {
// Get phase space point
std::vector<std::vector<double *>> p =
get_momenta(process.ninitial, energy, process.getMasses(), weight, dim);
// Set momenta for this event
for (int d = 0; d < dim; ++d) {
for (int i = 0; i < 6; ++i) {
for (int j = 0; j < 3; ++j) {
lp[i*dim*3+j*dim+d] = p[d][i][1+j];
}
}
}
//new
int num_bytes = 3*6*dim * sizeof(double);
double *allmomenta = 0;
cudaMalloc((void**)&allmomenta, num_bytes);
cudaMemcpy(allmomenta,lp,num_bytes,cudaMemcpyHostToDevice);
//gpuErrchk3(cudaMemcpy3D(&tdp));
//process.preSigmaKin();
if (perf) {
timer.Start();
}
// Evaluate matrix element
// later process.sigmaKin(ncomb, goodhel, ntry, sum_hel, ngood, igood,
// jhel);
sigmaKin<<<gpublocks, gputhreads>>>(allmomenta, meDevPtr);//, debug, verbose);
gpuErrchk3( cudaPeekAtLastError() );
//gpuErrchk3(cudaMemcpy2D(meHostPtr, sizeof(double), meDevPtr, mePitch,
// sizeof(double), dim, cudaMemcpyDeviceToHost));
cudaMemcpy(meHostPtr, meDevPtr, 1 * dim*sizeof(double), cudaMemcpyDeviceToHost);
if (verbose)
std::cout << "***********************************" << std::endl
<< "Iteration #" << x+1 << " of " << numiter << std::endl;
if (perf) {
float gputime = timer.GetDuration();
wavetimes.push_back(gputime);
if (verbose)
std::cout << "Wave function time: " << gputime << std::endl;
}
if (verbose || perf) {
for (int d = 0; d < dim; ++d) {
if (verbose) {
std::cout << "Momenta:" << std::endl;
for (int i = 0; i < process.nexternal; i++)
std::cout << std::setw(4) << i + 1
<< setiosflags(std::ios::scientific) << std::setw(14)
<< p[d][i][0] << setiosflags(std::ios::scientific)
<< std::setw(14) << p[d][i][1]
<< setiosflags(std::ios::scientific) << std::setw(14)
<< p[d][i][2] << setiosflags(std::ios::scientific)
<< std::setw(14) << p[d][i][3] << std::endl;
std::cout << std::string(80, '-') << std::endl;
}
// Display matrix elements
for (int i = 0; i < process.nprocesses; i++) {
if (verbose)
std::cout << " Matrix element = "
// << setiosflags(ios::fixed) << setprecision(17)
<< meHostPtr[i*1 + d] << " GeV^" << meGeVexponent << std::endl;
if (perf)
matrixelementvector.push_back(meHostPtr[i*1 + d]);
}
if (verbose)
std::cout << std::string(80, '-') << std::endl;
}
} else if (!debug) {
std::cout << ".";
}
for (std::vector<std::vector<double *>>::iterator it = p.begin();
it != p.end(); ++it) {
for (std::vector<double *>::iterator jt = it->begin(); jt != it->end();
++jt) {
delete[] & (**jt);
}
}
}
if (!(verbose || debug || perf)) {
std::cout << std::endl;
}
if (perf) {
float sum = std::accumulate(wavetimes.begin(), wavetimes.end(), 0.0);
int num_wts = wavetimes.size();
float mean = sum / num_wts;
float sq_sum = std::inner_product(wavetimes.begin(), wavetimes.end(),
wavetimes.begin(), 0.0);
float stdev = std::sqrt(sq_sum / num_wts - mean * mean);
std::vector<float>::iterator mintime =
std::min_element(wavetimes.begin(), wavetimes.end());
std::vector<float>::iterator maxtime =
std::max_element(wavetimes.begin(), wavetimes.end());
int num_mes = matrixelementvector.size();
float sumelem = std::accumulate(matrixelementvector.begin(), matrixelementvector.end(), 0.0);
float meanelem = sumelem / num_mes;
float sqselem = std::inner_product(matrixelementvector.begin(), matrixelementvector.end(),
matrixelementvector.begin(), 0.0);
float stdelem = std::sqrt(sqselem / num_mes - meanelem * meanelem);
std::vector<double>::iterator maxelem = std::max_element(
matrixelementvector.begin(), matrixelementvector.end());
std::vector<double>::iterator minelem = std::min_element(
matrixelementvector.begin(), matrixelementvector.end());
std::cout << "***********************************" << std::endl
<< "NumIterations = " << numiter << std::endl
<< "NumThreadsPerBlock = " << gputhreads << std::endl
<< "NumBlocksPerGrid = " << gpublocks << std::endl
<< "-----------------------------------" << std::endl
<< "NumberOfEntries = " << num_wts << std::endl
<< std::scientific
<< "TotalTimeInWaveFuncs = " << sum << " sec" << std::endl
<< "MeanTimeInWaveFuncs = " << mean << " sec" << std::endl
<< "StdDevTimeInWaveFuncs = " << stdev << " sec" << std::endl
<< "MinTimeInWaveFuncs = " << *mintime << " sec" << std::endl
<< "MaxTimeInWaveFuncs = " << *maxtime << " sec" << std::endl
<< "-----------------------------------" << std::endl
<< "ProcessID: = " << getpid() << std::endl
<< "NProcesses = " << process.nprocesses << std::endl
<< "NumMatrixElements = " << num_mes << std::endl
<< "MatrixElementsPerSec = " << num_mes/sum << " sec^-1" << std::endl;
std::cout << "***********************************" << std::endl
<< "NumMatrixElements = " << num_mes << std::endl
<< std::scientific
<< "MeanMatrixElemValue = " << meanelem << " GeV^" << meGeVexponent << std::endl
<< "StdErrMatrixElemValue = " << stdelem/sqrt(num_mes) << " GeV^" << meGeVexponent << std::endl
<< "StdDevMatrixElemValue = " << stdelem << " GeV^" << meGeVexponent << std::endl
<< "MinMatrixElemValue = " << *minelem << " GeV^" << meGeVexponent << std::endl
<< "MaxMatrixElemValue = " << *maxelem << " GeV^" << meGeVexponent << std::endl;
}
delete[] lp;
}
|
42fe1e21506dae79f68a8fbc5b7611329dfe7d71.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <quda_internal.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <quda_matrix.h>
#include <cassert>
namespace quda {
#ifdef GPU_CLOVER_DIRAC
template<class Cmplx>
struct CloverDerivArg
{
int X[4];
int border[4];
int mu;
int nu;
typename RealTypeId<Cmplx>::Type coeff;
int parity;
int volumeCB;
Cmplx* gauge;
Cmplx* force;
Cmplx* oprod;
int forceStride;
int gaugeStride;
int oprodStride;
int forceOffset;
int gaugeOffset;
int oprodOffset;
bool conjugate;
CloverDerivArg(cudaGaugeField& force, cudaGaugeField& gauge, cudaGaugeField& oprod, int mu, int nu, double coeff, int parity, bool conjugate) :
mu(mu), nu(nu), coeff(coeff), parity(parity), volumeCB(force.VolumeCB()),
force(reinterpret_cast<Cmplx*>(force.Gauge_p())), gauge(reinterpret_cast<Cmplx*>(gauge.Gauge_p())), oprod(reinterpret_cast<Cmplx*>(oprod.Gauge_p())),
forceStride(force.Stride()), gaugeStride(gauge.Stride()), oprodStride(oprod.Stride()),
forceOffset(force.Bytes()/(2*sizeof(Cmplx))), gaugeOffset(gauge.Bytes()/(2*sizeof(Cmplx))), oprodOffset(oprod.Bytes()/(2*sizeof(Cmplx)))
{
for(int dir=0; dir<4; ++dir) X[dir] = force.X()[dir];
//for(int dir=0; dir<4; ++dir) border[dir] = commDimPartitioned(dir) ? 2 : 0;
for(int dir=0; dir<4; ++dir) border[dir] = 2;
}
};
__device__ void getCoords(int x[4], int cb_index, const int X[4], int parity)
{
x[3] = cb_index/(X[2]*X[1]*X[0]/2);
x[2] = (cb_index/(X[1]*X[0]/2)) % X[2];
x[1] = (cb_index/(X[0]/2)) % X[1];
x[0] = 2*(cb_index%(X[0]/2)) + ((x[3]+x[2]+x[1]+parity)&1);
return;
}
__device__ int linkIndex(const int x[4], const int dx[4], const int X[4])
{
int y[4];
for (int i=0; i<4; i++) y[i] = (x[i] + dx[i] + X[i]) % X[i];
return (((y[3]*X[2] + y[2])*X[1] + y[1])*X[0] + y[0])/2;
}
template<typename Cmplx, bool isConjugate>
__global__ void
cloverDerivativeKernel(const CloverDerivArg<Cmplx> arg)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
if(index >= arg.volumeCB) return;
int x[4];
int y[4];
int otherparity = (1-arg.parity);
getCoords(x, index, arg.X, arg.parity);
getCoords(y, index, arg.X, otherparity);
int X[4];
for(int dir=0; dir<4; ++dir) X[dir] = arg.X[dir];
for(int dir=0; dir<4; ++dir){
x[dir] += arg.border[dir];
y[dir] += arg.border[dir];
X[dir] += 2*arg.border[dir];
}
Cmplx* thisGauge = arg.gauge + arg.parity*arg.gaugeOffset;
Cmplx* otherGauge = arg.gauge + (otherparity)*arg.gaugeOffset;
Cmplx* thisOprod = arg.oprod + arg.parity*arg.oprodOffset;
const int& mu = arg.mu;
const int& nu = arg.nu;
Matrix<Cmplx,3> thisForce;
Matrix<Cmplx,3> otherForce;
// U[mu](x) U[nu](x+mu) U[*mu](x+nu) U[*nu](x) Oprod(x)
{
int d[4] = {0, 0, 0, 0};
// load U(x)_(+mu)
Matrix<Cmplx,3> U1;
loadLinkVariableFromArray(thisGauge, mu, linkIndex(x, d, X),
arg.gaugeStride, &U1);
// load U(x+mu)_(+nu)
Matrix<Cmplx,3> U2;
d[mu]++;
loadLinkVariableFromArray(otherGauge, nu, linkIndex(x, d, X),
arg.gaugeStride, &U2);
d[mu]--;
// load U(x+nu)_(+mu)
Matrix<Cmplx,3> U3;
d[nu]++;
loadLinkVariableFromArray(otherGauge, mu, linkIndex(x, d, X),
arg.gaugeStride, &U3);
d[nu]--;
// load U(x)_(+nu)
Matrix<Cmplx,3> U4;
loadLinkVariableFromArray(thisGauge, nu, linkIndex(x, d, X),
arg.gaugeStride, &U4);
// load Oprod
Matrix<Cmplx,3> Oprod1;
loadMatrixFromArray(thisOprod, linkIndex(x, d, X), arg.oprodStride, &Oprod1);
if(isConjugate) Oprod1 -= conj(Oprod1);
thisForce = U1*U2*conj(U3)*conj(U4)*Oprod1;
Matrix<Cmplx,3> Oprod2;
d[mu]++; d[nu]++;
loadMatrixFromArray(thisOprod, linkIndex(x, d, X), arg.oprodStride, &Oprod2);
d[mu]--; d[nu]--;
if(isConjugate) Oprod2 -= conj(Oprod2);
thisForce += U1*U2*Oprod2*conj(U3)*conj(U4);
}
{
int d[4] = {0, 0, 0, 0};
// load U(x)_(+mu)
Matrix<Cmplx,3> U1;
loadLinkVariableFromArray(otherGauge, mu, linkIndex(y, d, X),
arg.gaugeStride, &U1);
// load U(x+mu)_(+nu)
Matrix<Cmplx,3> U2;
d[mu]++;
loadLinkVariableFromArray(thisGauge, nu, linkIndex(y, d, X),
arg.gaugeStride, &U2);
d[mu]--;
// load U(x+nu)_(+mu)
Matrix<Cmplx,3> U3;
d[nu]++;
loadLinkVariableFromArray(thisGauge, mu, linkIndex(y, d, X),
arg.gaugeStride, &U3);
d[nu]--;
// load U(x)_(+nu)
Matrix<Cmplx,3> U4;
loadLinkVariableFromArray(otherGauge, nu, linkIndex(y, d, X),
arg.gaugeStride, &U4);
// load opposite parity Oprod
Matrix<Cmplx,3> Oprod3;
d[nu]++;
loadMatrixFromArray(thisOprod, linkIndex(y, d, X), arg.oprodStride, &Oprod3);
d[nu]--;
if(isConjugate) Oprod3 -= conj(Oprod3);
otherForce = U1*U2*conj(U3)*Oprod3*conj(U4);
// load Oprod(x+mu)
Matrix<Cmplx, 3> Oprod4;
d[mu]++;
loadMatrixFromArray(thisOprod, linkIndex(y, d, X), arg.oprodStride, &Oprod4);
d[mu]--;
if(isConjugate) Oprod4 -= conj(Oprod4);
otherForce += U1*Oprod4*U2*conj(U3)*conj(U4);
}
// Lower leaf
// U[nu*](x-nu) U[mu](x-nu) U[nu](x+mu-nu) Oprod(x+mu) U[*mu](x)
{
int d[4] = {0, 0, 0, 0};
// load U(x-nu)(+nu)
Matrix<Cmplx,3> U1;
d[nu]--;
loadLinkVariableFromArray(thisGauge, nu, linkIndex(y, d, X),
arg.gaugeStride, &U1);
d[nu]++;
// load U(x-nu)(+mu)
Matrix<Cmplx, 3> U2;
d[nu]--;
loadLinkVariableFromArray(thisGauge, mu, linkIndex(y, d, X),
arg.gaugeStride, &U2);
d[nu]++;
// load U(x+mu-nu)(nu)
Matrix<Cmplx, 3> U3;
d[mu]++; d[nu]--;
loadLinkVariableFromArray(otherGauge, nu, linkIndex(y, d, X),
arg.gaugeStride, &U3);
d[mu]--; d[nu]++;
// load U(x)_(+mu)
Matrix<Cmplx,3> U4;
loadLinkVariableFromArray(otherGauge, mu, linkIndex(y, d, X),
arg.gaugeStride, &U4);
// load Oprod(x+mu)
Matrix<Cmplx, 3> Oprod1;
d[mu]++;
loadMatrixFromArray(thisOprod, linkIndex(y, d, X), arg.oprodStride, &Oprod1);
d[mu]--;
if(isConjugate) Oprod1 -= conj(Oprod1);
otherForce -= conj(U1)*U2*U3*Oprod1*conj(U4);
Matrix<Cmplx,3> Oprod2;
d[nu]--;
loadMatrixFromArray(thisOprod, linkIndex(y, d, X), arg.oprodStride, &Oprod2);
d[nu]++;
if(isConjugate) Oprod2 -= conj(Oprod2);
otherForce -= conj(U1)*Oprod2*U2*U3*conj(U4);
}
{
int d[4] = {0, 0, 0, 0};
// load U(x-nu)(+nu)
Matrix<Cmplx,3> U1;
d[nu]--;
loadLinkVariableFromArray(otherGauge, nu, linkIndex(x, d, X),
arg.gaugeStride, &U1);
d[nu]++;
// load U(x-nu)(+mu)
Matrix<Cmplx, 3> U2;
d[nu]--;
loadLinkVariableFromArray(otherGauge, mu, linkIndex(x, d, X),
arg.gaugeStride, &U2);
d[nu]++;
// load U(x+mu-nu)(nu)
Matrix<Cmplx, 3> U3;
d[mu]++; d[nu]--;
loadLinkVariableFromArray(thisGauge, nu, linkIndex(x, d, X),
arg.gaugeStride, &U3);
d[mu]--; d[nu]++;
// load U(x)_(+mu)
Matrix<Cmplx,3> U4;
loadLinkVariableFromArray(thisGauge, mu, linkIndex(x, d, X),
arg.gaugeStride, &U4);
Matrix<Cmplx,3> Oprod1;
d[mu]++; d[nu]--;
loadMatrixFromArray(thisOprod, linkIndex(x, d, X), arg.oprodStride, &Oprod1);
d[mu]--; d[nu]++;
if(isConjugate) Oprod1 -= conj(Oprod1);
thisForce -= conj(U1)*U2*Oprod1*U3*conj(U4);
Matrix<Cmplx, 3> Oprod4;
loadMatrixFromArray(thisOprod, linkIndex(x, d, X), arg.oprodStride, &Oprod4);
if(isConjugate) Oprod4 -= conj(Oprod4);
thisForce -= Oprod4*conj(U1)*U2*U3*conj(U4);
}
thisForce *= arg.coeff;
otherForce *= arg.coeff;
// Write to array
{
appendMatrixToArray(thisForce, index, arg.forceStride, arg.force + arg.parity*arg.forceOffset);
appendMatrixToArray(otherForce, index, arg.forceStride, arg.force + otherparity*arg.forceOffset);
}
return;
} // cloverDerivativeKernel
template<typename Complex>
class CloverDerivative : public Tunable {
private:
CloverDerivArg<Complex> arg;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return arg.volumeCB; }
bool tuneGridDim() const { return false; }
public:
CloverDerivative(const CloverDerivArg<Complex> &arg)
: arg(arg) {
sprintf(vol,"%dx%dx%dx%d",arg.X[0],arg.X[1],arg.X[2],arg.X[3]);
sprintf(aux,"threads=%d,prec=%lu,stride=%d,geometery=%d",
arg.volumeCB,sizeof(Complex)/2,arg.forceOffset);
}
virtual ~CloverDerivative() {}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(arg.conjugate){
hipLaunchKernelGGL(( cloverDerivativeKernel<Complex,true>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg);
}else{
hipLaunchKernelGGL(( cloverDerivativeKernel<Complex,false>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg);
}
} // apply
void preTune(){}
void postTune(){}
long long flops() const {
return 0;
}
long long bytes() const { return 0; }
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); }
};
template<typename Float>
void cloverDerivative(cudaGaugeField &out,
cudaGaugeField& gauge,
cudaGaugeField& oprod,
int mu, int nu, double coeff, int parity,
int conjugate)
{
typedef typename ComplexTypeId<Float>::Type Complex;
CloverDerivArg<Complex> arg(out, gauge, oprod, mu, nu, coeff, parity, conjugate);
// CloverDerivative<Complex> cloverDerivative(arg);
// cloverDerivative.apply(0);
dim3 blockDim(128, 1, 1);
dim3 gridDim((arg.volumeCB + blockDim.x-1)/blockDim.x, 1, 1);
if(conjugate){
hipLaunchKernelGGL(( cloverDerivativeKernel<Complex,true>), dim3(gridDim),dim3(blockDim),0, 0, arg);
}else{
hipLaunchKernelGGL(( cloverDerivativeKernel<Complex,false>), dim3(gridDim),dim3(blockDim),0, 0, arg);
}
}
#endif
void cloverDerivative(cudaGaugeField &out,
cudaGaugeField& gauge,
cudaGaugeField& oprod,
int mu, int nu, double coeff, QudaParity parity, int conjugate)
{
#ifdef GPU_CLOVER_DIRAC
assert(oprod.Geometry() == QUDA_SCALAR_GEOMETRY);
assert(out.Geometry() == QUDA_SCALAR_GEOMETRY);
int device_parity = (parity == QUDA_EVEN_PARITY) ? 0 : 1;
if(out.Precision() == QUDA_DOUBLE_PRECISION){
cloverDerivative<double>(out, gauge, oprod, mu, nu, coeff, device_parity, conjugate);
} else if (out.Precision() == QUDA_SINGLE_PRECISION){
cloverDerivative<float>(out, gauge, oprod, mu, nu, coeff, device_parity, conjugate);
} else {
errorQuda("Precision %d not supported", out.Precision());
}
return;
#else
errorQuda("Clover has not been built");
#endif
}
} // namespace quda
|
42fe1e21506dae79f68a8fbc5b7611329dfe7d71.cu
|
#include <cstdio>
#include <cstdlib>
#include <cuda.h>
#include <quda_internal.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <quda_matrix.h>
#include <cassert>
namespace quda {
#ifdef GPU_CLOVER_DIRAC
template<class Cmplx>
struct CloverDerivArg
{
int X[4];
int border[4];
int mu;
int nu;
typename RealTypeId<Cmplx>::Type coeff;
int parity;
int volumeCB;
Cmplx* gauge;
Cmplx* force;
Cmplx* oprod;
int forceStride;
int gaugeStride;
int oprodStride;
int forceOffset;
int gaugeOffset;
int oprodOffset;
bool conjugate;
CloverDerivArg(cudaGaugeField& force, cudaGaugeField& gauge, cudaGaugeField& oprod, int mu, int nu, double coeff, int parity, bool conjugate) :
mu(mu), nu(nu), coeff(coeff), parity(parity), volumeCB(force.VolumeCB()),
force(reinterpret_cast<Cmplx*>(force.Gauge_p())), gauge(reinterpret_cast<Cmplx*>(gauge.Gauge_p())), oprod(reinterpret_cast<Cmplx*>(oprod.Gauge_p())),
forceStride(force.Stride()), gaugeStride(gauge.Stride()), oprodStride(oprod.Stride()),
forceOffset(force.Bytes()/(2*sizeof(Cmplx))), gaugeOffset(gauge.Bytes()/(2*sizeof(Cmplx))), oprodOffset(oprod.Bytes()/(2*sizeof(Cmplx)))
{
for(int dir=0; dir<4; ++dir) X[dir] = force.X()[dir];
//for(int dir=0; dir<4; ++dir) border[dir] = commDimPartitioned(dir) ? 2 : 0;
for(int dir=0; dir<4; ++dir) border[dir] = 2;
}
};
__device__ void getCoords(int x[4], int cb_index, const int X[4], int parity)
{
x[3] = cb_index/(X[2]*X[1]*X[0]/2);
x[2] = (cb_index/(X[1]*X[0]/2)) % X[2];
x[1] = (cb_index/(X[0]/2)) % X[1];
x[0] = 2*(cb_index%(X[0]/2)) + ((x[3]+x[2]+x[1]+parity)&1);
return;
}
__device__ int linkIndex(const int x[4], const int dx[4], const int X[4])
{
int y[4];
for (int i=0; i<4; i++) y[i] = (x[i] + dx[i] + X[i]) % X[i];
return (((y[3]*X[2] + y[2])*X[1] + y[1])*X[0] + y[0])/2;
}
template<typename Cmplx, bool isConjugate>
__global__ void
cloverDerivativeKernel(const CloverDerivArg<Cmplx> arg)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
if(index >= arg.volumeCB) return;
int x[4];
int y[4];
int otherparity = (1-arg.parity);
getCoords(x, index, arg.X, arg.parity);
getCoords(y, index, arg.X, otherparity);
int X[4];
for(int dir=0; dir<4; ++dir) X[dir] = arg.X[dir];
for(int dir=0; dir<4; ++dir){
x[dir] += arg.border[dir];
y[dir] += arg.border[dir];
X[dir] += 2*arg.border[dir];
}
Cmplx* thisGauge = arg.gauge + arg.parity*arg.gaugeOffset;
Cmplx* otherGauge = arg.gauge + (otherparity)*arg.gaugeOffset;
Cmplx* thisOprod = arg.oprod + arg.parity*arg.oprodOffset;
const int& mu = arg.mu;
const int& nu = arg.nu;
Matrix<Cmplx,3> thisForce;
Matrix<Cmplx,3> otherForce;
// U[mu](x) U[nu](x+mu) U[*mu](x+nu) U[*nu](x) Oprod(x)
{
int d[4] = {0, 0, 0, 0};
// load U(x)_(+mu)
Matrix<Cmplx,3> U1;
loadLinkVariableFromArray(thisGauge, mu, linkIndex(x, d, X),
arg.gaugeStride, &U1);
// load U(x+mu)_(+nu)
Matrix<Cmplx,3> U2;
d[mu]++;
loadLinkVariableFromArray(otherGauge, nu, linkIndex(x, d, X),
arg.gaugeStride, &U2);
d[mu]--;
// load U(x+nu)_(+mu)
Matrix<Cmplx,3> U3;
d[nu]++;
loadLinkVariableFromArray(otherGauge, mu, linkIndex(x, d, X),
arg.gaugeStride, &U3);
d[nu]--;
// load U(x)_(+nu)
Matrix<Cmplx,3> U4;
loadLinkVariableFromArray(thisGauge, nu, linkIndex(x, d, X),
arg.gaugeStride, &U4);
// load Oprod
Matrix<Cmplx,3> Oprod1;
loadMatrixFromArray(thisOprod, linkIndex(x, d, X), arg.oprodStride, &Oprod1);
if(isConjugate) Oprod1 -= conj(Oprod1);
thisForce = U1*U2*conj(U3)*conj(U4)*Oprod1;
Matrix<Cmplx,3> Oprod2;
d[mu]++; d[nu]++;
loadMatrixFromArray(thisOprod, linkIndex(x, d, X), arg.oprodStride, &Oprod2);
d[mu]--; d[nu]--;
if(isConjugate) Oprod2 -= conj(Oprod2);
thisForce += U1*U2*Oprod2*conj(U3)*conj(U4);
}
{
int d[4] = {0, 0, 0, 0};
// load U(x)_(+mu)
Matrix<Cmplx,3> U1;
loadLinkVariableFromArray(otherGauge, mu, linkIndex(y, d, X),
arg.gaugeStride, &U1);
// load U(x+mu)_(+nu)
Matrix<Cmplx,3> U2;
d[mu]++;
loadLinkVariableFromArray(thisGauge, nu, linkIndex(y, d, X),
arg.gaugeStride, &U2);
d[mu]--;
// load U(x+nu)_(+mu)
Matrix<Cmplx,3> U3;
d[nu]++;
loadLinkVariableFromArray(thisGauge, mu, linkIndex(y, d, X),
arg.gaugeStride, &U3);
d[nu]--;
// load U(x)_(+nu)
Matrix<Cmplx,3> U4;
loadLinkVariableFromArray(otherGauge, nu, linkIndex(y, d, X),
arg.gaugeStride, &U4);
// load opposite parity Oprod
Matrix<Cmplx,3> Oprod3;
d[nu]++;
loadMatrixFromArray(thisOprod, linkIndex(y, d, X), arg.oprodStride, &Oprod3);
d[nu]--;
if(isConjugate) Oprod3 -= conj(Oprod3);
otherForce = U1*U2*conj(U3)*Oprod3*conj(U4);
// load Oprod(x+mu)
Matrix<Cmplx, 3> Oprod4;
d[mu]++;
loadMatrixFromArray(thisOprod, linkIndex(y, d, X), arg.oprodStride, &Oprod4);
d[mu]--;
if(isConjugate) Oprod4 -= conj(Oprod4);
otherForce += U1*Oprod4*U2*conj(U3)*conj(U4);
}
// Lower leaf
// U[nu*](x-nu) U[mu](x-nu) U[nu](x+mu-nu) Oprod(x+mu) U[*mu](x)
{
int d[4] = {0, 0, 0, 0};
// load U(x-nu)(+nu)
Matrix<Cmplx,3> U1;
d[nu]--;
loadLinkVariableFromArray(thisGauge, nu, linkIndex(y, d, X),
arg.gaugeStride, &U1);
d[nu]++;
// load U(x-nu)(+mu)
Matrix<Cmplx, 3> U2;
d[nu]--;
loadLinkVariableFromArray(thisGauge, mu, linkIndex(y, d, X),
arg.gaugeStride, &U2);
d[nu]++;
// load U(x+mu-nu)(nu)
Matrix<Cmplx, 3> U3;
d[mu]++; d[nu]--;
loadLinkVariableFromArray(otherGauge, nu, linkIndex(y, d, X),
arg.gaugeStride, &U3);
d[mu]--; d[nu]++;
// load U(x)_(+mu)
Matrix<Cmplx,3> U4;
loadLinkVariableFromArray(otherGauge, mu, linkIndex(y, d, X),
arg.gaugeStride, &U4);
// load Oprod(x+mu)
Matrix<Cmplx, 3> Oprod1;
d[mu]++;
loadMatrixFromArray(thisOprod, linkIndex(y, d, X), arg.oprodStride, &Oprod1);
d[mu]--;
if(isConjugate) Oprod1 -= conj(Oprod1);
otherForce -= conj(U1)*U2*U3*Oprod1*conj(U4);
Matrix<Cmplx,3> Oprod2;
d[nu]--;
loadMatrixFromArray(thisOprod, linkIndex(y, d, X), arg.oprodStride, &Oprod2);
d[nu]++;
if(isConjugate) Oprod2 -= conj(Oprod2);
otherForce -= conj(U1)*Oprod2*U2*U3*conj(U4);
}
{
int d[4] = {0, 0, 0, 0};
// load U(x-nu)(+nu)
Matrix<Cmplx,3> U1;
d[nu]--;
loadLinkVariableFromArray(otherGauge, nu, linkIndex(x, d, X),
arg.gaugeStride, &U1);
d[nu]++;
// load U(x-nu)(+mu)
Matrix<Cmplx, 3> U2;
d[nu]--;
loadLinkVariableFromArray(otherGauge, mu, linkIndex(x, d, X),
arg.gaugeStride, &U2);
d[nu]++;
// load U(x+mu-nu)(nu)
Matrix<Cmplx, 3> U3;
d[mu]++; d[nu]--;
loadLinkVariableFromArray(thisGauge, nu, linkIndex(x, d, X),
arg.gaugeStride, &U3);
d[mu]--; d[nu]++;
// load U(x)_(+mu)
Matrix<Cmplx,3> U4;
loadLinkVariableFromArray(thisGauge, mu, linkIndex(x, d, X),
arg.gaugeStride, &U4);
Matrix<Cmplx,3> Oprod1;
d[mu]++; d[nu]--;
loadMatrixFromArray(thisOprod, linkIndex(x, d, X), arg.oprodStride, &Oprod1);
d[mu]--; d[nu]++;
if(isConjugate) Oprod1 -= conj(Oprod1);
thisForce -= conj(U1)*U2*Oprod1*U3*conj(U4);
Matrix<Cmplx, 3> Oprod4;
loadMatrixFromArray(thisOprod, linkIndex(x, d, X), arg.oprodStride, &Oprod4);
if(isConjugate) Oprod4 -= conj(Oprod4);
thisForce -= Oprod4*conj(U1)*U2*U3*conj(U4);
}
thisForce *= arg.coeff;
otherForce *= arg.coeff;
// Write to array
{
appendMatrixToArray(thisForce, index, arg.forceStride, arg.force + arg.parity*arg.forceOffset);
appendMatrixToArray(otherForce, index, arg.forceStride, arg.force + otherparity*arg.forceOffset);
}
return;
} // cloverDerivativeKernel
template<typename Complex>
class CloverDerivative : public Tunable {
private:
CloverDerivArg<Complex> arg;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return arg.volumeCB; }
bool tuneGridDim() const { return false; }
public:
CloverDerivative(const CloverDerivArg<Complex> &arg)
: arg(arg) {
sprintf(vol,"%dx%dx%dx%d",arg.X[0],arg.X[1],arg.X[2],arg.X[3]);
sprintf(aux,"threads=%d,prec=%lu,stride=%d,geometery=%d",
arg.volumeCB,sizeof(Complex)/2,arg.forceOffset);
}
virtual ~CloverDerivative() {}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(arg.conjugate){
cloverDerivativeKernel<Complex,true><<<tp.grid,tp.block,tp.shared_bytes>>>(arg);
}else{
cloverDerivativeKernel<Complex,false><<<tp.grid,tp.block,tp.shared_bytes>>>(arg);
}
} // apply
void preTune(){}
void postTune(){}
long long flops() const {
return 0;
}
long long bytes() const { return 0; }
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); }
};
template<typename Float>
void cloverDerivative(cudaGaugeField &out,
cudaGaugeField& gauge,
cudaGaugeField& oprod,
int mu, int nu, double coeff, int parity,
int conjugate)
{
typedef typename ComplexTypeId<Float>::Type Complex;
CloverDerivArg<Complex> arg(out, gauge, oprod, mu, nu, coeff, parity, conjugate);
// CloverDerivative<Complex> cloverDerivative(arg);
// cloverDerivative.apply(0);
dim3 blockDim(128, 1, 1);
dim3 gridDim((arg.volumeCB + blockDim.x-1)/blockDim.x, 1, 1);
if(conjugate){
cloverDerivativeKernel<Complex,true><<<gridDim,blockDim,0>>>(arg);
}else{
cloverDerivativeKernel<Complex,false><<<gridDim,blockDim,0>>>(arg);
}
}
#endif
void cloverDerivative(cudaGaugeField &out,
cudaGaugeField& gauge,
cudaGaugeField& oprod,
int mu, int nu, double coeff, QudaParity parity, int conjugate)
{
#ifdef GPU_CLOVER_DIRAC
assert(oprod.Geometry() == QUDA_SCALAR_GEOMETRY);
assert(out.Geometry() == QUDA_SCALAR_GEOMETRY);
int device_parity = (parity == QUDA_EVEN_PARITY) ? 0 : 1;
if(out.Precision() == QUDA_DOUBLE_PRECISION){
cloverDerivative<double>(out, gauge, oprod, mu, nu, coeff, device_parity, conjugate);
} else if (out.Precision() == QUDA_SINGLE_PRECISION){
cloverDerivative<float>(out, gauge, oprod, mu, nu, coeff, device_parity, conjugate);
} else {
errorQuda("Precision %d not supported", out.Precision());
}
return;
#else
errorQuda("Clover has not been built");
#endif
}
} // namespace quda
|
c49bbc5b3448d2b2335fc40d58056e6dbbff5d33.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <hiprand/hiprand_kernel.h>
__global__ void initialConditions(double* vars, int num_param, int num_cells, int cells_per_thread) {
double V = -83.5092;
double m = 0.0025 ;
double h = 0.6945 ;
double j = 0.6924 ;
double d = 4.2418e-005 ;
double f = 0.9697 ;
double f2 = 0.9784 ;
double fCass = 0.9999 ;
double r = 3.2195e-008 ;
double s = 1.0000 ;
double xs = 0.0038 ;
double xr1 = 3.1298e-004 ;
double xr2 = 0.4534 ;
double Rbar_ryr = 0.9816 ;
double Cai = 0.04 ;
double Cass = 0.2381 ;
double CaSR = 3.6426e+003 ;
double Nai = 3.8067e+003 ;
double Ki = 1.2369e+005 ;
// Within each test, the variables are divided as follows
// V(cell1), V(cell2), V(cell3) ... V(cellLast), m(cell1), m(cell2), ... m(cellLast) ... for all 19 parameters
int idx = threadIdx.x*cells_per_thread;
int simulations = blockIdx.x;
int limit = idx+cells_per_thread;
for (;idx<limit;idx++) {
vars[(simulations*num_param*num_cells) + idx +(0*num_cells)] = V;
vars[(simulations*num_param*num_cells) + idx +(1*num_cells)] = m;
vars[(simulations*num_param*num_cells) + idx +(2*num_cells)] = h;
vars[(simulations*num_param*num_cells) + idx +(3*num_cells)] = j;
vars[(simulations*num_param*num_cells) + idx +(4*num_cells)] = d;
vars[(simulations*num_param*num_cells) + idx +(5*num_cells)] = f;
vars[(simulations*num_param*num_cells) + idx +(6*num_cells)] = f2;
vars[(simulations*num_param*num_cells) + idx +(7*num_cells)] = fCass;
vars[(simulations*num_param*num_cells) + idx +(8*num_cells)] = r;
vars[(simulations*num_param*num_cells) + idx +(9*num_cells)] = s;
vars[(simulations*num_param*num_cells) + idx +(10*num_cells)] = xs;
vars[(simulations*num_param*num_cells) + idx +(11*num_cells)] = xr1;
vars[(simulations*num_param*num_cells) + idx +(12*num_cells)] = xr2;
vars[(simulations*num_param*num_cells) + idx +(13*num_cells)] = Rbar_ryr;
vars[(simulations*num_param*num_cells) + idx +(14*num_cells)] = Cai;
vars[(simulations*num_param*num_cells) + idx +(15*num_cells)] = Cass;
vars[(simulations*num_param*num_cells) + idx +(16*num_cells)] = CaSR;
vars[(simulations*num_param*num_cells) + idx +(17*num_cells)] = Nai;
vars[(simulations*num_param*num_cells) + idx +(18*num_cells)] = Ki;
}
}
__global__ void computeState(double* x, double* ion_current, int total_cells, double step, double* randNums, int variations, double* x_temp, int num_changing_vars, int cells_per_thread) {
int idx = cells_per_thread*threadIdx.x;
int cell_num;
int limit = idx+cells_per_thread;
for (;idx<limit;idx++) {
cell_num = (blockIdx.x*total_cells*19) + idx;
//Index Variables to make life easier
//Array is categorized by blocks of size=total_cells, each block contains the values of one parameter across the cells
int V_i = 0*total_cells ;
int m_i = 1*total_cells ;
int h_i = 2*total_cells ;
int j_i = 3*total_cells ;
int d_i = 4*total_cells ;
int f_i = 5*total_cells ;
int f2_i = 6*total_cells ;
int fCass_i = 7*total_cells ;
int r_i = 8*total_cells ;
int s_i = 9*total_cells ;
int xs_i = 10*total_cells ;
int xr1_i = 11*total_cells ;
int xr2_i = 12*total_cells ;
int Rbar_ryr_i = 13*total_cells ;
int Cai_i = 14*total_cells ;
int Cass_i = 15*total_cells ;
int CaSR_i = 16*total_cells ;
int Nai_i = 17*total_cells ;
int Ki_i = 18*total_cells ;
double ENa, EK, ECa, EKs, INa, ICa, Ito, IKs, IKr;
double aK1, bK1, xK1, IK1;
double minf, am, bm, taum;
double hinf, jinf, ad, bd, gd, taud, finf, af, bf, gf, tauf;
double f2inf, af2, bf2, gf2, tauf2, fCassinf, taufCass;
double rinf, taur, sinf, taus, xsinf, axs, bxs, tauxs;
double axr1, bxr1, tauxr1, xr1inf, xr2inf, axr2, bxr2, tauxr2;
double Ileak, Iup, kcasr, k1_ryr, k2_ryr, O_ryr;
double Irel, Ixfer, Bi, Bss, BSR;
double ah, bh, aj, bj;
double tauh, tauj, dinf, INCX, fNaK, INaK, IpCa, IpK, INab, ICab, Iion;
char celltype;
double F = 96.4853415; // Faraday's constant, coulombs/mmol
double R = 8.314472; //gas constant, J/(K mol)
double T = 310.0; // fabsolute temperature, K
double RTF = R*T/F;
double Acap = 5.6297*3.280e-5; // cm2
double Vmyo = 16.404; // pL
double VSR = 1.094; // pL
double Vss = 0.05468 ; // pL
double Ko = 5400 ; // uM
double Nao = 140000 ; // uM
double Cao = 2000 ; // uM
double PCa_;
double GNa_;
double GK1_;
double GKr_;
double GpK_;
double GpCa_;
double GNab_;
double GCab_;
double Gto_;
double GKs_;
double INaK_;
double kNaCa;
double Vleak;
double Iup_;
double Vrel;
double pKNa = 0.03 ; // relative permeability, Na to K
double KmNa = 87500 ; // uM
double KmCa = 1380 ; // uM
double ksat = 0.1 ; // unitless
double alpha_ncx = 2.5 ; // unitless
double eta = 0.35 ; // unitless, actually gamma in paper
double KmNai = 40000 ; // Na-K pump // uM
double KmKo = 1000 ; // Sarcolemmal Ca pump // uM
double KpCa = 0.5 ; // SERCA // uM
double Kmup = 0.25 ; // uM
double Vxfer = 0.0038 ; // ms^-1
double k1_ryr_prime = 0.15e-6; // uM-2 ms-1
double k2_ryr_prime = 0.045e-3 ; // uM-1 ms-1
double k3_ryr = 0.06 ; // ms-1
double k4_ryr = 0.005 ; // ms-1 as per KWTT source code
double maxsr = 2.5 ; // dimensionless
double minsr = 1 ; // dimensionless
double EC_ryr = 1500 ; // uM
double Bufc = 200 ; // uM
double Kbufc = 1 ; // uM
double Bufss = 400 ; // uM
double Kbufss = 0.25 ; // uM
double BufSR = 10000 ; // uM
double KbufSR = 300 ; // uM
celltype = 'p' ; //epi
//celltype = 'n' ; //endo
//celltype = 'm' ; //mid
if (num_changing_vars==0) {
GNa_ = 14.838; // nS/pF
GK1_ = 5.405; // nS/pF
GKr_ = 0.153; // nS/pF
GpK_ = 0.0146; // nS/pF
GpCa_ = 0.1238; // nS/pF
GNab_ = 2.9e-4; // nS/pF
GCab_ = 5.92e-4; // nS/pF
if (celltype == 'n') { //endo
Gto_ = 0.073 ;
GKs_ = 0.392 ;
}
else if (celltype == 'p') { //epi
Gto_ = 0.294 ;
GKs_ = 0.392 ;
}
else if (celltype == 'm') { //mid
Gto_ = 0.294; // nS/pF
GKs_ = 0.098; // nS/pF
}
PCa_ = 3.980e-5;
INaK_ = 2.724; // pA/pF
kNaCa = 1000 ; // pA/pF
Vleak = 3.6e-4 ; // ms^-1
Iup_ = 6.375 ; // uM/ms
Vrel = 0.102 ; // ms^-1 as per KWTT source code
}
else {
GNa_ = 14.838*randNums[(blockIdx.x*num_changing_vars)+0];
GK1_ = 5.405*randNums[(blockIdx.x*num_changing_vars)+1];
GKr_ = 0.153*randNums[(blockIdx.x*num_changing_vars)+2];
GpK_ = 0.0146*randNums[(blockIdx.x*num_changing_vars)+3];
GpCa_ = 0.1238*randNums[(blockIdx.x*num_changing_vars)+4];
GNab_ = 2.9e-4*randNums[(blockIdx.x*num_changing_vars)+5];
GCab_ = 5.92e-4*randNums[(blockIdx.x*num_changing_vars)+6];
if (celltype == 'n') { //endo
Gto_ = 0.073*randNums[(blockIdx.x*num_changing_vars)+7];
GKs_ = 0.392*randNums[(blockIdx.x*num_changing_vars)+8];
}
else if (celltype == 'p') { //epi
Gto_ = 0.294*randNums[(blockIdx.x*num_changing_vars)+7];
GKs_ = 0.392*randNums[(blockIdx.x*num_changing_vars)+8];
}
else if (celltype == 'm') { //mid
Gto_ = 0.294*randNums[(blockIdx.x*num_changing_vars)+7];
GKs_ = 0.098*randNums[(blockIdx.x*num_changing_vars)+8];
}
PCa_ = 3.980e-5*randNums[(blockIdx.x*num_changing_vars)+9];
INaK_ = 2.724*randNums[(blockIdx.x*num_changing_vars)+10];
kNaCa = 1000*randNums[(blockIdx.x*num_changing_vars)+11];
Vleak = 3.6e-4*randNums[(blockIdx.x*num_changing_vars)+12];
Iup_ = 6.375*randNums[(blockIdx.x*num_changing_vars)+13] ;
Vrel = 0.102*randNums[(blockIdx.x*num_changing_vars)+14] ;
}
ENa = RTF*log(Nao/x[cell_num+Nai_i]) ;
EK = RTF*log(Ko/x[cell_num+Ki_i]) ;
ECa = 0.5*RTF*log(Cao/x[cell_num+Cai_i]) ;
EKs = RTF*log((Ko + pKNa*Nao)/(x[cell_num+Ki_i] + pKNa*x[cell_num+Nai_i])) ;
INa = GNa_*pow(x[cell_num+m_i],3)*x[cell_num+h_i]*x[cell_num+j_i]*(x[cell_num+V_i] - ENa) ;
ICa = PCa_*x[cell_num+d_i]*x[cell_num+f_i]*x[cell_num+f2_i]*x[cell_num+fCass_i]*4*F/RTF*(x[cell_num+V_i]-15)*(0.25*x[cell_num+Cass_i]*exp(2*(x[cell_num+V_i]-15)/RTF) - Cao)/ (exp(2*(x[cell_num+V_i]-15)/RTF) - 1) ;
Ito = Gto_*x[cell_num+r_i]*x[cell_num+s_i]*(x[cell_num+V_i]-EK) ;
IKs = GKs_*pow(x[cell_num+xs_i],2)*(x[cell_num+V_i]-EKs) ;
IKr = GKr_*sqrt(Ko/5400)*x[cell_num+xr1_i]*x[cell_num+xr2_i]*(x[cell_num+V_i]-EK) ;
aK1 = 0.1/(exp(0.06*(x[cell_num+V_i]-EK-200)) + 1) ;
bK1 = ( 3*exp(2e-4*(x[cell_num+V_i]-EK+100)) + exp(0.1*(x[cell_num+V_i]-EK-10)) )/ (exp(-0.5*(x[cell_num+V_i]-EK)) + 1) ;
xK1 = aK1/(aK1+bK1) ;
IK1 = GK1_*sqrt(Ko/5400)*xK1*(x[cell_num+V_i]-EK) ;
INCX = kNaCa*pow((pow(KmNa,3) + pow(Nao,3)),-1)*pow((KmCa + Cao),-1)*pow((ksat*exp((eta-1)*x[cell_num+V_i]/RTF) + 1),-1)* (exp(eta*x[cell_num+V_i]/RTF)*pow(x[cell_num+Nai_i],3)*Cao - exp((eta-1)*x[cell_num+V_i]/RTF)*pow(Nao,3)*x[cell_num+Cai_i]*alpha_ncx) ;
fNaK = 1/(1 + 0.1245*exp(-0.1*x[cell_num+V_i]/RTF) + 0.0353*exp(-x[cell_num+V_i]/RTF)) ;
INaK = INaK_*Ko*x[cell_num+Nai_i]*fNaK/( (Ko + KmKo)*(x[cell_num+Nai_i] + KmNai) ) ;
IpCa = GpCa_*x[cell_num+Cai_i]/(x[cell_num+Cai_i] + KpCa) ;
IpK = GpK_*(x[cell_num+V_i]-EK)/(exp(-(x[cell_num+V_i]-25)/5.98) + 1) ;
INab = GNab_*(x[cell_num+V_i]-ENa) ;
ICab = GCab_*(x[cell_num+V_i]-ECa) ;
Iion = INa + ICa + Ito + IKs + IKr + IK1 + INCX + INaK + IpCa + IpK + INab + ICab ;
minf = 1/pow((exp((-56.86-x[cell_num+V_i])/9.03) + 1),2) ;
am = 1/(exp((-60-x[cell_num+V_i])/5) + 1) ;
bm = 0.1/(exp((x[cell_num+V_i]+35)/5) + 1) + 0.1/(exp((x[cell_num+V_i]-50)/200) + 1) ;
taum = am*bm ;
hinf = ( 1/pow((exp((x[cell_num+V_i]+71.55)/7.43) + 1),2)) ;
jinf = ( 1/pow((exp((x[cell_num+V_i]+71.55)/7.43) + 1),2)) ;
if (x[cell_num+V_i] >= -40) {
ah = 0 ;
bh = 0.77/(0.13*(exp((-x[cell_num+V_i]-10.66)/11.1) + 1)) ;
aj = 0 ;
bj = 0.6*exp(0.057*x[cell_num+V_i])/(exp(-0.1*(x[cell_num+V_i]+32)) + 1) ;
}
else {
ah = 0.057*exp((-x[cell_num+V_i]-80)/6.8) ;
bh = 2.7*exp(0.079*x[cell_num+V_i])+3.1e5*exp(0.3485*x[cell_num+V_i]) ;
aj = (-2.5428e4*exp(0.2444*x[cell_num+V_i])-6.948e-6*exp(-0.04391*x[cell_num+V_i]))*(x[cell_num+V_i]+37.78)/ (exp(0.311*(x[cell_num+V_i]+79.23))+1) ;
bj = 0.02424*exp(-0.01052*x[cell_num+V_i])/(exp(-0.1378*(x[cell_num+V_i]+40.14)) + 1);
}
tauh = 1/(ah+bh) ;
tauj = 1/(aj+bj) ;
dinf = 1/(exp(-(x[cell_num+V_i]+8)/7.5) + 1) ;
ad = 1.4/(exp(-(x[cell_num+V_i]+35)/13) + 1) + 0.25 ;
bd = 1.4/(exp((x[cell_num+V_i]+5)/5) + 1) ;
gd = 1/(exp((50-x[cell_num+V_i])/20) + 1) ;
taud = (ad*bd + gd) ;
finf = 1/(exp((x[cell_num+V_i]+20)/7) + 1) ;
af = 1102.5*exp(-pow((x[cell_num+V_i]+27),2)/225) ;
bf = 200/(1 + exp((13-x[cell_num+V_i])/10)) ;
gf = 180/(1 + exp((x[cell_num+V_i]+30)/10)) + 20 ;
tauf = (af + bf + gf) ;
f2inf = 0.67/(exp((x[cell_num+V_i]+35)/7) + 1) + 0.33 ;
af2 = 600*exp(-pow((x[cell_num+V_i]+25),2)/170) ;
bf2 = 31/(1 + exp((25-x[cell_num+V_i])/10)) ;
gf2 = 16/(1 + exp((x[cell_num+V_i]+30)/10)) ;
tauf2 = (af2 + bf2 + gf2) ;
fCassinf = 0.6/(1 + pow((x[cell_num+Cass_i]/50),2)) + 0.4 ;
taufCass = 80/(1 + pow((x[cell_num+Cass_i]/50),2)) + 2 ;
rinf = 1/(exp((20-x[cell_num+V_i])/6) + 1) ;
taur = (9.5*exp(-pow((x[cell_num+V_i]+40),2)/1800) + 0.8) ;
sinf = 1/(exp((x[cell_num+V_i]+20)/5) + 1) ;
taus = (85*exp(-pow((x[cell_num+V_i]+45),2)/320) + 5/(exp((x[cell_num+V_i]-20)/5) + 1) + 3) ;
xsinf = 1/(exp(-(x[cell_num+V_i]+5)/14) + 1) ;
axs = 1400/sqrt (exp(-(x[cell_num+V_i]-5)/6) + 1) ;
bxs = 1/(exp((x[cell_num+V_i]-35)/15) + 1) ;
tauxs = (axs*bxs + 80) ;
xr1inf = 1/(exp(-(x[cell_num+V_i]+26)/7) + 1) ;
axr1 = 450/(exp(-(x[cell_num+V_i]+45)/10) + 1) ;
bxr1 = 6/(exp((x[cell_num+V_i]+30)/11.5) + 1) ;
tauxr1 = (axr1*bxr1) ;
xr2inf = 1/(exp((x[cell_num+V_i]+88)/24) + 1) ;
axr2 = 3/(exp(-(x[cell_num+V_i]+60)/20) + 1) ;
bxr2 = 1.12/(exp((x[cell_num+V_i]-60)/20) + 1) ;
tauxr2 = (axr2*bxr2) ;
Ileak = Vleak*(x[cell_num+CaSR_i] - x[cell_num+Cai_i]) ;
Iup = Iup_/(pow((Kmup/x[cell_num+Cai_i]),2) + 1) ;
kcasr = maxsr - (maxsr - minsr)/(1 + pow((EC_ryr/x[cell_num+CaSR_i]),2)) ;
k1_ryr = k1_ryr_prime/kcasr ;
k2_ryr = k2_ryr_prime*kcasr ;
O_ryr = k1_ryr*pow(x[cell_num+Cass_i],2)*x[cell_num+Rbar_ryr_i]/(k3_ryr + k1_ryr*pow(x[cell_num+Cass_i],2)) ;
Irel = Vrel*O_ryr*(x[cell_num+CaSR_i] - x[cell_num+Cass_i]) ;
Ixfer = Vxfer*(x[cell_num+Cass_i] - x[cell_num+Cai_i]) ;
Bi = pow((1 + Bufc*Kbufc/pow((Kbufc + x[cell_num+Cai_i]),2)),-1) ;
Bss = pow((1 + Bufss*Kbufss/pow((Kbufss + x[cell_num+Cass_i]),2)),-1) ;
BSR = pow((1 + BufSR*KbufSR/pow((KbufSR + x[cell_num+CaSR_i]),2)),-1) ;
ion_current[cell_num] = Iion;
//new states into temp array
if ( !isinf(x[cell_num+m_i] + step*((minf-x[cell_num+m_i])/taum)) && !isnan(x[cell_num+m_i] + step*((minf-x[cell_num+m_i])/taum))) {
x_temp[cell_num+m_i] = x[cell_num+m_i] + step*((minf-x[cell_num+m_i])/taum) ;
}
else { x_temp[cell_num+m_i] = x[cell_num+m_i]; }
if ( !isinf(x[cell_num+h_i] + step*( (hinf-x[cell_num+h_i])/tauh)) && !isnan(x[cell_num+h_i] + step*( (hinf-x[cell_num+h_i])/tauh))) {
x_temp[cell_num+h_i] =x[cell_num+h_i] + step*( (hinf-x[cell_num+h_i])/tauh) ;
}
else { x_temp[cell_num+h_i] =x[cell_num+h_i]; }
if ( !isinf( x[cell_num+j_i] + step*( (jinf-x[cell_num+j_i])/tauj)) && !isnan( x[cell_num+j_i] + step*( (jinf-x[cell_num+j_i])/tauj))) {
x_temp[cell_num+j_i] = x[cell_num+j_i] + step*( (jinf-x[cell_num+j_i])/tauj) ;
}
else { x_temp[cell_num+j_i] = x[cell_num+j_i]; }
if ( !isinf(x[cell_num+d_i] + step*( (dinf-x[cell_num+d_i])/taud)) && !isnan(x[cell_num+d_i] + step*( (dinf-x[cell_num+d_i])/taud))) {
x_temp[cell_num+d_i] = x[cell_num+d_i] + step*( (dinf-x[cell_num+d_i])/taud) ;
}
else { x_temp[cell_num+d_i] = x[cell_num+d_i]; }
if ( !isinf(x[cell_num+f_i] + step*((finf-x[cell_num+f_i])/tauf)) && !isnan(x[cell_num+f_i] + step*((finf-x[cell_num+f_i])/tauf))) {
x_temp[cell_num+f_i] = x[cell_num+f_i] + step*((finf-x[cell_num+f_i])/tauf) ;
}
else { x_temp[cell_num+f_i] = x[cell_num+f_i]; }
if ( !isinf(x[cell_num+f2_i] + step*( (f2inf-x[cell_num+f2_i])/tauf2)) && !isnan(x[cell_num+f2_i] + step*( (f2inf-x[cell_num+f2_i])/tauf2))) {
x_temp[cell_num+f2_i] = x[cell_num+f2_i] + step*( (f2inf-x[cell_num+f2_i])/tauf2) ;
}
else { x_temp[cell_num+f2_i] = x[cell_num+f2_i]; }
if ( !isinf(x[cell_num+fCass_i] + step*( (fCassinf-x[cell_num+fCass_i])/taufCass)) && !isnan(x[cell_num+fCass_i] + step*( (fCassinf-x[cell_num+fCass_i])/taufCass))) {
x_temp[cell_num+fCass_i] = x[cell_num+fCass_i] + step*( (fCassinf-x[cell_num+fCass_i])/taufCass) ;
}
else { x_temp[cell_num+fCass_i] = x[cell_num+fCass_i]; }
if ( !isinf(x[cell_num+r_i] + step*((rinf-x[cell_num+r_i])/taur)) && !isnan(x[cell_num+r_i] + step*((rinf-x[cell_num+r_i])/taur))) {
x_temp[cell_num+r_i] = x[cell_num+r_i] + step*((rinf-x[cell_num+r_i])/taur) ;
}
else { x_temp[cell_num+r_i] = x[cell_num+r_i]; }
if ( !isinf(x[cell_num+s_i] + step*((sinf-x[cell_num+s_i])/taus)) && !isnan(x[cell_num+s_i] + step*((sinf-x[cell_num+s_i])/taus))) {
x_temp[cell_num+s_i] = x[cell_num+s_i] + step*((sinf-x[cell_num+s_i])/taus) ;
}
else { x_temp[cell_num+s_i] = x[cell_num+s_i]; }
if ( !isinf(x[cell_num+xs_i] + step*((xsinf-x[cell_num+xs_i])/tauxs)) && !isnan(x[cell_num+xs_i] + step*((xsinf-x[cell_num+xs_i])/tauxs))) {
x_temp[cell_num+xs_i] = x[cell_num+xs_i] + step*((xsinf-x[cell_num+xs_i])/tauxs) ;
}
else { x_temp[cell_num+xs_i] = x[cell_num+xs_i]; }
if ( !isinf(x[cell_num+xr1_i] + step*( (xr1inf-x[cell_num+xr1_i])/tauxr1)) && !isnan(x[cell_num+xr1_i] + step*( (xr1inf-x[cell_num+xr1_i])/tauxr1))) {
x_temp[cell_num+xr1_i] = x[cell_num+xr1_i] + step*( (xr1inf-x[cell_num+xr1_i])/tauxr1) ;
}
else { x_temp[cell_num+xr1_i] = x[cell_num+xr1_i]; }
if ( !isinf(x[cell_num+xr2_i] + step*( (xr2inf-x[cell_num+xr2_i])/tauxr2)) && !isnan(x[cell_num+xr2_i] + step*( (xr2inf-x[cell_num+xr2_i])/tauxr2))) {
x_temp[cell_num+xr2_i] = x[cell_num+xr2_i] + step*( (xr2inf-x[cell_num+xr2_i])/tauxr2) ;
}
else { x_temp[cell_num+xr2_i] = x[cell_num+xr2_i];}
if ( !isinf(x[cell_num+Rbar_ryr_i] + step*( -k2_ryr*x[cell_num+Cass_i]*x[cell_num+Rbar_ryr_i] + k4_ryr*(1 - x[cell_num+Rbar_ryr_i]))) && !isnan(x[cell_num+Rbar_ryr_i] + step*( -k2_ryr*x[cell_num+Cass_i]*x[cell_num+Rbar_ryr_i] + k4_ryr*(1 - x[cell_num+Rbar_ryr_i])))) {
x_temp[cell_num+Rbar_ryr_i] = x[cell_num+Rbar_ryr_i] + step*( -k2_ryr*x[cell_num+Cass_i]*x[cell_num+Rbar_ryr_i] + k4_ryr*(1 - x[cell_num+Rbar_ryr_i])) ;
}
else { x_temp[cell_num+Rbar_ryr_i] = x[cell_num+Rbar_ryr_i]; }
if ( !isinf(x[cell_num+Cai_i] + step* (Bi*(-(IpCa + ICab - 2*INCX)*1e6*Acap/(2*F*Vmyo) + (VSR/Vmyo)*(Ileak - Iup) + Ixfer))) && !isnan(x[cell_num+Cai_i] + step* (Bi*(-(IpCa + ICab - 2*INCX)*1e6*Acap/(2*F*Vmyo) + (VSR/Vmyo)*(Ileak - Iup) + Ixfer)))) {
x_temp[cell_num+Cai_i] = x[cell_num+Cai_i] + step* (Bi*(-(IpCa + ICab - 2*INCX)*1e6*Acap/(2*F*Vmyo) + (VSR/Vmyo)*(Ileak - Iup) + Ixfer)) ;
}
else { x_temp[cell_num+Cai_i] = x[cell_num+Cai_i]; }
if ( !isinf(x[cell_num+Cass_i] + step*( (Bss*(-ICa*1e6*Acap/(2*F*Vss) + VSR/Vss*Irel - Vmyo/Vss*Ixfer)))) && !isnan(x[cell_num+Cass_i] + step*( (Bss*(-ICa*1e6*Acap/(2*F*Vss) + VSR/Vss*Irel - Vmyo/Vss*Ixfer)))) ) {
x_temp[cell_num+Cass_i] = x[cell_num+Cass_i] + step*( (Bss*(-ICa*1e6*Acap/(2*F*Vss) + VSR/Vss*Irel - Vmyo/Vss*Ixfer))) ;
}
else { x_temp[cell_num+Cass_i] = x[cell_num+Cass_i];}
if ( !isinf(x[cell_num+CaSR_i] + step* (BSR*(Iup - Ileak - Irel))) && !isnan(x[cell_num+CaSR_i] + step* (BSR*(Iup - Ileak - Irel)))) {
x_temp[cell_num+CaSR_i] = x[cell_num+CaSR_i] + step* (BSR*(Iup - Ileak - Irel)) ;
}
else { x_temp[cell_num+CaSR_i] = x[cell_num+CaSR_i]; }
if ( !isinf(x[cell_num+Nai_i] + step*( -(INa + 3*INCX + 3*INaK + INab)*1e6*Acap/(F*Vmyo))) && !isnan(x[cell_num+Nai_i] + step*( -(INa + 3*INCX + 3*INaK + INab)*1e6*Acap/(F*Vmyo)))) {
x_temp[cell_num+Nai_i] = x[cell_num+Nai_i] + step*( -(INa + 3*INCX + 3*INaK + INab)*1e6*Acap/(F*Vmyo)) ;
}
else { x_temp[cell_num+Nai_i] = x[cell_num+Nai_i]; }
if ( !isinf(x[cell_num+Ki_i] + step*(-(Ito + IKs + IKr + IK1 - 2*INaK + IpK)*1e5*Acap/(F*Vmyo))) && !isnan(x[cell_num+Ki_i] + step*(-(Ito + IKs + IKr + IK1 - 2*INaK + IpK)*1e5*Acap/(F*Vmyo)))) {
x_temp[cell_num+Ki_i] = x[cell_num+Ki_i] + step*(-(Ito + IKs + IKr + IK1 - 2*INaK + IpK)*1e5*Acap/(F*Vmyo)) ;
}
else { x_temp[cell_num+Ki_i] = x[cell_num+Ki_i]; }
}
}
__global__ void updateState(double* x, double* x_temp, int num_cells, int cells_per_thread) {
int i;
int idx = cells_per_thread*threadIdx.x;
int variations = blockIdx.x;
int limit = idx+cells_per_thread;
for (;idx<limit;idx++) {
for (i=1;i<19;i++) {
x[(variations*19*num_cells) + idx +(i*num_cells)] = x_temp[(variations*19*num_cells) + idx +(i*num_cells)];
}
}
}
__global__ void compute_voltage(double* x, double* V, double* Iion, double step, double* randNums, int variations, int length, int width, int num_changing_vars, int time, double stimDur, double stimAmp, int tstim, double* s2time, int cells_per_thread, bool s2_analysis, int s2_loc) {
int num_cells = length*width;
int m;
int n;
double stim = 0.0;
double Istim1 = 0.0;
double Istim2 = 0.0;
double Vnet_R, Vnet_L, Vnet_U, Vnet_D;
double rad = 0.0011 ;
double deltx = 0.01 ;
double rho;
double Cm = 2 ;
double Rmyo;
double gj;
int tstim2;
int idx = cells_per_thread*threadIdx.x;
int limit = idx+cells_per_thread;
for (;idx<limit;idx++) {
m = (blockIdx.x * num_cells) + idx;
n = (blockIdx.x * num_cells*19) + idx;
if (num_changing_vars==0) {
gj = 1.27 ;
Rmyo = 526;
}
else {
gj = 1.27*randNums[(blockIdx.x*num_changing_vars)+15] ;
Rmyo = 526*randNums[(blockIdx.x*num_changing_vars)+16];
}
rho = 3.14159*pow(rad,2)*(Rmyo+1000/gj)/deltx; // total resistivity
if (s2_analysis) {
tstim2 = s2time[blockIdx.x]/step;
}
if ( time%tstim > (stimDur/step)) {Istim1 = 0.0;}
else { Istim1 = stimAmp;}
if (s2_analysis) {
if ( time>=tstim2 && time<=(stimDur/step)+tstim2) {Istim2 = -150;}
else {Istim2 = 0.0;}
}
// Cable Model
if (width==1) {
if(idx==0) { //first + stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*((x[n+1]-x[n])) - (Iion[n]+Istim1) /Cm )) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*((x[n+1]-x[n])) - (Iion[n]+Istim1) /Cm )) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*((x[n+1]-x[n])) - (Iion[n]+Istim1) /Cm ) ;
//V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n] + x[n+length-1]) - (Iion[n]+Istim1) /Cm ) ; // loop
}
else { V[m] = x[n];}
}
else if(idx==num_cells-1){ //last
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(-x[n] + x[n-1]) - (Iion[n]) /Cm )) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(-x[n] + x[n-1]) - (Iion[n]) /Cm )) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(-x[n] + x[n-1]) - (Iion[n]) /Cm );
//V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1-length] - 2*x[n] + x[n-1]) - (Iion[n]) /Cm ); // loop
}
else { V[m] = x[n]; }
}
else if(idx==1) { //need stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm)) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm)) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm);
}
else { V[m] = x[n]; }
}
else if(idx==2) { //need stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm)) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm)) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm);
}
else { V[m] = x[n]; }
}
//stim2
else if (s2_analysis && s2_loc == idx) { //need stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm);
}
else { V[m] = x[n]; }
}
else if (s2_analysis && s2_loc+1 == idx) { //need stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm);
}
else { V[m] = x[n]; }
}
else if (s2_analysis && s2_loc-1 == idx) { //need stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm);
}
else { V[m] = x[n]; }
}
else {
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*( (x[n-1]-x[n]) + (x[n+1]-x[n]) ) - (Iion[n]) /Cm ) ) && !isnan ((x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*( (x[n-1]-x[n]) + (x[n+1]-x[n]) ) - (Iion[n]) /Cm )) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*( (x[n-1]-x[n]) + (x[n+1]-x[n]) ) - (Iion[n]) /Cm );
}
else { V[m] = x[n]; }
}
}
//Tissue Model
else {
// set which cells will get a stimulus
if (idx==0 || idx==1) { stim=Istim1; }
if (idx==2 || idx==0+length) { stim=Istim1; }
if (idx==1+length || idx==2+length) { stim=Istim1; }
if (idx==0+2*length || idx==1+2*length) { stim=Istim1; }
if (idx==2+2*length) { stim=Istim1; }
if ( threadIdx.x>=0 && threadIdx.x<=length-1 ) { // Top Edge
Vnet_U = 0.0;
}
else {
Vnet_U = x[n-length] - x[n];
}
if ( threadIdx.x>=((width*length)-length) && threadIdx.x<=((width*length)-1) ) { // Bottom Edge
Vnet_D = 0.0;
}
else {
Vnet_D = x[n+length] - x[n];
}
if ( threadIdx.x%length==0 ) { // Left Edge
Vnet_L = 0.0;
//Vnet_L = x[n+length-1] - x[n]; // tissue loop
}
else {
Vnet_L = x[n-1] - x[n];
}
if ( threadIdx.x%length==(length-1) ) { // Right Edge
Vnet_R = 0.0;
//Vnet_R = x[n+1-length] - x[n]; // tissue loop
}
else {
Vnet_R = x[n+1] - x[n];
}
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(Vnet_R+Vnet_L+Vnet_U+Vnet_D) - (Iion[n]+stim) /Cm )) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(Vnet_R+Vnet_L+Vnet_U+Vnet_D) - (Iion[n]+stim) /Cm )) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(Vnet_R+Vnet_L+Vnet_U+Vnet_D) - (Iion[n]+stim) /Cm ) ;
}
else { V[m] = x[n]; }
}
}
}
__global__ void update_voltage(double* x, double* V, int total_cells, int cells_per_thread) {
int idx = cells_per_thread*threadIdx.x;
int limit = idx+cells_per_thread;
for (;idx<limit;idx++) {
int m = (blockIdx.x * total_cells) + idx;
int n = (blockIdx.x * total_cells*19) + idx;
x[n] = V[m];
}
}
__global__ void computeVelocity(double* voltage, int iterations, int num_cells, double* vel, double time, int length, int width) {
double startTP = 0.0;
double endTP = 0.0;
double deltx = 0.01 ;
int i,k;
int idx = threadIdx.x;
double distance;
int start = idx*num_cells*iterations;
for (i=0;i<iterations;i++) { // Looking at first cell voltage only
if (voltage[start+i] >= -55) {
startTP = (i-start)*time;
break;
}
}
for (k=(iterations*(num_cells-1));k<(iterations*num_cells);k++) { // Looking at last cell voltage only
if (voltage[start+k] >= -55) {
endTP = ((k-start) -(iterations*(num_cells-1)))*time;
break;
}
}
//distance = ;
vel[idx] =endTP-startTP;
}
__global__ void init_randomNums(hiprandState_t *state) {
int idx = blockDim.x*blockIdx.x + threadIdx.x ;
hiprand_init(1337, idx, 0, &state[idx]);
}
__global__ void make_randomNums(hiprandState_t *state, double* randArray) {
int idx = blockDim.x*blockIdx.x + threadIdx.x ;
double sigma = 0.15; //SD of variation of parameter
randArray[idx] = exp(sigma*hiprand_normal(&state[idx]));
}
__global__ void initialize_time_s2(double begin_time, double interval, double* time_s2) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
time_s2[idx] = begin_time+(interval*idx);
}
__global__ void percentage_excited(double* V_array, int iterations, int num_cells, double* percent, int variations) {
int idx = threadIdx.x;
int last_row = iterations-1;
int start = last_row + (idx*num_cells*iterations);
int num_excited = 0;
int i;
for (i=0;i<num_cells;i++) {
if(V_array[start+(i*iterations)] >= -55) {
num_excited++;
}
}
percent[idx] = ((double)num_excited/(double)num_cells);
}
int main( int argc, const char* argv[] )
{
int i, ii;
int time = 0;
FILE *fV = fopen("tt06 GPU Voltage", "w");
FILE *ft = fopen("tt06 GPU Time", "w");
FILE *output = fopen("tt06 GPU Sensitivity Analysis", "w");
FILE *s2output = fopen("tt06 GPU s2 Analysis", "w");
int index=0;
double* host_vars;
double* dev_vars;
double* dev_ion_currents;
double* dev_x_temp;
double* host_Vtemp;
double* dev_Vtemp;
double* V_array;
double* t_array;
double* dev_V_array;
double* dev_vel;
double* vel;
double* s2_times;
double* s2_times_dev;
double* percent_excited;
double* dev_percent_excited;
hipEvent_t start,stop;
float elapsedTime;
hiprandState_t *rndState;
double* dev_randNums;
double* randNums;
int size;
double begin_time;
double end_time;
double test_interval;
int total_s2_times;
int s2_loc;
//Number of Parameters in the Model
int num_param = 19;
// Assume only running 1 simulation initially
int simulations = 1;
// Time Step Variables
double step = 0.002;
double tend = 10;
int iterations = tend/step;
double skip_time_value = 0.5; //ms
int skip_timept = skip_time_value/step; // skipping time points in voltage array & time array
int total_timepts = iterations/skip_timept;
// Number of Cells
int length = 10;
int width = 1;
int num_cells = length*width;
int cells_per_thread = 1; // for cell numbers > 500, one thread may need to work on more than one cell
//Stimulus Variables
double stimDur = 2.0;
double stimAmp = -80.0;
double stimInterval = 1000;
int tstim = stimInterval/step;
// Sensitivity Analysis?
//int num_changing_vars = 18;
int num_changing_vars = 0;
// S2 Analysis?
bool s2_analysis = false;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
if (s2_analysis) {
begin_time = 900;
end_time = 930;
test_interval = 10;
s2_loc = 5;
total_s2_times = (end_time-begin_time)/test_interval; //make sure not too many threads
simulations = total_s2_times;
s2_times = (double*)malloc(sizeof(double)*total_s2_times);
hipMalloc( &s2_times_dev, sizeof(double)*total_s2_times);
hipLaunchKernelGGL(( initialize_time_s2), dim3(1),dim3(total_s2_times), 0, 0, begin_time, test_interval, s2_times_dev);
hipMemcpy(s2_times, s2_times_dev, total_s2_times*sizeof(double), hipMemcpyDeviceToHost);
}
size = num_param*num_cells*simulations;
if (num_changing_vars != 0) {
simulations = 10;
hipMalloc( &rndState, simulations*num_changing_vars);
hipMalloc( &dev_randNums, sizeof(double)*simulations*num_changing_vars);
randNums = (double*)malloc(sizeof(double)*simulations*num_changing_vars);
hipLaunchKernelGGL(( init_randomNums), dim3(simulations),dim3(num_changing_vars), 0, 0, rndState);
hipLaunchKernelGGL(( make_randomNums), dim3(simulations),dim3(num_changing_vars), 0, 0, rndState, dev_randNums);
}
size = num_param*num_cells*simulations;
// vars array contains voltage&state vartiables for all cells across all simulations
host_vars = (double *)malloc(sizeof(double)*size);
hipMalloc( &dev_vars, sizeof(double)*size);
// results of the computeState kernel
hipMalloc( &dev_ion_currents, sizeof(double)*num_cells*simulations);
hipMalloc( &dev_x_temp, sizeof(double)*size);
// result of the computeVoltage kernel
host_Vtemp = (double*)malloc(sizeof(double)*num_cells*simulations);
hipMalloc( &dev_Vtemp, sizeof(double)*num_cells*simulations);
V_array = (double*)malloc(sizeof(double)*(total_timepts*num_cells*simulations));
t_array = (double*)malloc(sizeof(double)*(total_timepts*simulations));
fprintf(fV, "V = [ \n");
// Initialize vars array with initial conditions
hipLaunchKernelGGL(( initialConditions), dim3(simulations),dim3((num_cells/cells_per_thread)), 0, 0, dev_vars,num_param,num_cells, cells_per_thread);
while (time<iterations) {
hipLaunchKernelGGL(( computeState), dim3(simulations),dim3((num_cells/cells_per_thread)), 0, 0, dev_vars, dev_ion_currents, num_cells, step, dev_randNums, simulations, dev_x_temp, num_changing_vars, cells_per_thread);
hipLaunchKernelGGL(( updateState), dim3(simulations),dim3((num_cells/cells_per_thread)), 0, 0, dev_vars, dev_x_temp, num_cells, cells_per_thread);
hipLaunchKernelGGL(( compute_voltage), dim3(simulations),dim3((num_cells/cells_per_thread)), 0, 0, dev_vars, dev_Vtemp, dev_ion_currents, step, dev_randNums, simulations, length, width, num_changing_vars, time, stimDur, stimAmp, tstim, s2_times_dev, cells_per_thread, s2_analysis, s2_loc);
hipLaunchKernelGGL(( update_voltage), dim3(simulations),dim3((num_cells/cells_per_thread)), 0, 0, dev_vars, dev_Vtemp, num_cells, cells_per_thread);
//update Voltage and time arrays and write data to file
hipMemcpy(host_Vtemp, dev_Vtemp, num_cells*simulations*sizeof(double), hipMemcpyDeviceToHost);
if (time%skip_timept == 0) {
for (i=0;i<num_cells*simulations;i++) {
V_array[ (i*(iterations/skip_timept)) +index] = host_Vtemp[i];
fprintf(fV, "%f\t ", host_Vtemp[i]);
}
fprintf(fV, "\n");
fprintf(ft, "%f \n", time*step);
for (i=0;i<simulations;i++) {
t_array[(index*simulations)+i] = time*step;
}
index++;
}
time++;
}
fprintf(fV, "]; \n");
/*
The Model Computations are Finished
This last section of code is only writing data to file(s) and cleaning up the memory
*/
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
free(host_vars);
hipFree(dev_vars);
hipFree(dev_ion_currents);
hipFree(dev_x_temp);
free(host_Vtemp);
hipFree(dev_Vtemp);
printf("Elapsed Time = %f s \n",elapsedTime/1000);
printf("\n");
printf("Calculating Simulation outputs...\n");
printf("\n");
if (num_changing_vars != 0) {
vel = (double*)malloc(sizeof(double)*simulations);
hipMalloc( &dev_vel,(sizeof(double)*simulations));
hipMalloc( &dev_V_array, sizeof(double)*(total_timepts*num_cells*simulations));
hipMemcpy(dev_V_array, V_array, sizeof(double)*(total_timepts*num_cells*simulations), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( computeVelocity), dim3(1),dim3(simulations), 0, 0, dev_V_array, total_timepts, num_cells, dev_vel, step*skip_timept, length, width);
hipMemcpy(vel, dev_vel, simulations*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(randNums, dev_randNums, num_changing_vars*simulations*sizeof(double), hipMemcpyDeviceToHost);
fprintf(output, "A = [ \n");
for (i=0;i<simulations;i++) {
for (ii=0;ii<num_changing_vars;ii++) {
fprintf(output, "%f\t", randNums[(i*num_changing_vars)+ii]);
}
fprintf(output, "\n");
}
fprintf(output, "]; \n");
fprintf(output, "\n");
fprintf(output, "Vel = [ \n");
for (i=0;i<simulations;i++) {
fprintf(output, "%f\n", vel[i]);
}
fprintf(output, "]; \n");
hipFree(rndState);
hipFree(dev_randNums);
free(randNums);
free(vel);
hipFree(dev_vel);
}
if (s2_analysis) {
hipMalloc( &dev_V_array, sizeof(double)*(total_timepts*num_cells*simulations));
hipMemcpy(dev_V_array, V_array, sizeof(double)*(total_timepts*num_cells*simulations), hipMemcpyHostToDevice);
percent_excited = (double*)malloc(sizeof(double)*total_s2_times);
hipMalloc( &dev_percent_excited, sizeof(double)*total_s2_times);
hipLaunchKernelGGL(( percentage_excited), dim3(1),dim3(total_s2_times), 0, 0, dev_V_array, total_timepts, num_cells, dev_percent_excited, simulations);
hipMemcpy(percent_excited, dev_percent_excited, total_s2_times*sizeof(double), hipMemcpyDeviceToHost);
fprintf(s2output, "A = [ \n");
for (i=0;i<simulations;i++) {
fprintf(s2output, "%f\n", s2_times[i]);
}
fprintf(s2output, "]; \n");
fprintf(s2output, "\n");
fprintf(s2output, "% = [ \n");
for (i=0;i<simulations;i++) {
fprintf(s2output, "%f\n", percent_excited[i]);
}
fprintf(s2output, "]; \n");
free(percent_excited);
hipFree(dev_percent_excited);
}
free(V_array);
hipFree(dev_V_array);
printf("Program is Done\n");
}
|
c49bbc5b3448d2b2335fc40d58056e6dbbff5d33.cu
|
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <curand_kernel.h>
__global__ void initialConditions(double* vars, int num_param, int num_cells, int cells_per_thread) {
double V = -83.5092;
double m = 0.0025 ;
double h = 0.6945 ;
double j = 0.6924 ;
double d = 4.2418e-005 ;
double f = 0.9697 ;
double f2 = 0.9784 ;
double fCass = 0.9999 ;
double r = 3.2195e-008 ;
double s = 1.0000 ;
double xs = 0.0038 ;
double xr1 = 3.1298e-004 ;
double xr2 = 0.4534 ;
double Rbar_ryr = 0.9816 ;
double Cai = 0.04 ;
double Cass = 0.2381 ;
double CaSR = 3.6426e+003 ;
double Nai = 3.8067e+003 ;
double Ki = 1.2369e+005 ;
// Within each test, the variables are divided as follows
// V(cell1), V(cell2), V(cell3) ... V(cellLast), m(cell1), m(cell2), ... m(cellLast) ... for all 19 parameters
int idx = threadIdx.x*cells_per_thread;
int simulations = blockIdx.x;
int limit = idx+cells_per_thread;
for (;idx<limit;idx++) {
vars[(simulations*num_param*num_cells) + idx +(0*num_cells)] = V;
vars[(simulations*num_param*num_cells) + idx +(1*num_cells)] = m;
vars[(simulations*num_param*num_cells) + idx +(2*num_cells)] = h;
vars[(simulations*num_param*num_cells) + idx +(3*num_cells)] = j;
vars[(simulations*num_param*num_cells) + idx +(4*num_cells)] = d;
vars[(simulations*num_param*num_cells) + idx +(5*num_cells)] = f;
vars[(simulations*num_param*num_cells) + idx +(6*num_cells)] = f2;
vars[(simulations*num_param*num_cells) + idx +(7*num_cells)] = fCass;
vars[(simulations*num_param*num_cells) + idx +(8*num_cells)] = r;
vars[(simulations*num_param*num_cells) + idx +(9*num_cells)] = s;
vars[(simulations*num_param*num_cells) + idx +(10*num_cells)] = xs;
vars[(simulations*num_param*num_cells) + idx +(11*num_cells)] = xr1;
vars[(simulations*num_param*num_cells) + idx +(12*num_cells)] = xr2;
vars[(simulations*num_param*num_cells) + idx +(13*num_cells)] = Rbar_ryr;
vars[(simulations*num_param*num_cells) + idx +(14*num_cells)] = Cai;
vars[(simulations*num_param*num_cells) + idx +(15*num_cells)] = Cass;
vars[(simulations*num_param*num_cells) + idx +(16*num_cells)] = CaSR;
vars[(simulations*num_param*num_cells) + idx +(17*num_cells)] = Nai;
vars[(simulations*num_param*num_cells) + idx +(18*num_cells)] = Ki;
}
}
__global__ void computeState(double* x, double* ion_current, int total_cells, double step, double* randNums, int variations, double* x_temp, int num_changing_vars, int cells_per_thread) {
int idx = cells_per_thread*threadIdx.x;
int cell_num;
int limit = idx+cells_per_thread;
for (;idx<limit;idx++) {
cell_num = (blockIdx.x*total_cells*19) + idx;
//Index Variables to make life easier
//Array is categorized by blocks of size=total_cells, each block contains the values of one parameter across the cells
int V_i = 0*total_cells ;
int m_i = 1*total_cells ;
int h_i = 2*total_cells ;
int j_i = 3*total_cells ;
int d_i = 4*total_cells ;
int f_i = 5*total_cells ;
int f2_i = 6*total_cells ;
int fCass_i = 7*total_cells ;
int r_i = 8*total_cells ;
int s_i = 9*total_cells ;
int xs_i = 10*total_cells ;
int xr1_i = 11*total_cells ;
int xr2_i = 12*total_cells ;
int Rbar_ryr_i = 13*total_cells ;
int Cai_i = 14*total_cells ;
int Cass_i = 15*total_cells ;
int CaSR_i = 16*total_cells ;
int Nai_i = 17*total_cells ;
int Ki_i = 18*total_cells ;
double ENa, EK, ECa, EKs, INa, ICa, Ito, IKs, IKr;
double aK1, bK1, xK1, IK1;
double minf, am, bm, taum;
double hinf, jinf, ad, bd, gd, taud, finf, af, bf, gf, tauf;
double f2inf, af2, bf2, gf2, tauf2, fCassinf, taufCass;
double rinf, taur, sinf, taus, xsinf, axs, bxs, tauxs;
double axr1, bxr1, tauxr1, xr1inf, xr2inf, axr2, bxr2, tauxr2;
double Ileak, Iup, kcasr, k1_ryr, k2_ryr, O_ryr;
double Irel, Ixfer, Bi, Bss, BSR;
double ah, bh, aj, bj;
double tauh, tauj, dinf, INCX, fNaK, INaK, IpCa, IpK, INab, ICab, Iion;
char celltype;
double F = 96.4853415; // Faraday's constant, coulombs/mmol
double R = 8.314472; //gas constant, J/(K mol)
double T = 310.0; // fabsolute temperature, K
double RTF = R*T/F;
double Acap = 5.6297*3.280e-5; // cm2
double Vmyo = 16.404; // pL
double VSR = 1.094; // pL
double Vss = 0.05468 ; // pL
double Ko = 5400 ; // uM
double Nao = 140000 ; // uM
double Cao = 2000 ; // uM
double PCa_;
double GNa_;
double GK1_;
double GKr_;
double GpK_;
double GpCa_;
double GNab_;
double GCab_;
double Gto_;
double GKs_;
double INaK_;
double kNaCa;
double Vleak;
double Iup_;
double Vrel;
double pKNa = 0.03 ; // relative permeability, Na to K
double KmNa = 87500 ; // uM
double KmCa = 1380 ; // uM
double ksat = 0.1 ; // unitless
double alpha_ncx = 2.5 ; // unitless
double eta = 0.35 ; // unitless, actually gamma in paper
double KmNai = 40000 ; // Na-K pump // uM
double KmKo = 1000 ; // Sarcolemmal Ca pump // uM
double KpCa = 0.5 ; // SERCA // uM
double Kmup = 0.25 ; // uM
double Vxfer = 0.0038 ; // ms^-1
double k1_ryr_prime = 0.15e-6; // uM-2 ms-1
double k2_ryr_prime = 0.045e-3 ; // uM-1 ms-1
double k3_ryr = 0.06 ; // ms-1
double k4_ryr = 0.005 ; // ms-1 as per KWTT source code
double maxsr = 2.5 ; // dimensionless
double minsr = 1 ; // dimensionless
double EC_ryr = 1500 ; // uM
double Bufc = 200 ; // uM
double Kbufc = 1 ; // uM
double Bufss = 400 ; // uM
double Kbufss = 0.25 ; // uM
double BufSR = 10000 ; // uM
double KbufSR = 300 ; // uM
celltype = 'p' ; //epi
//celltype = 'n' ; //endo
//celltype = 'm' ; //mid
if (num_changing_vars==0) {
GNa_ = 14.838; // nS/pF
GK1_ = 5.405; // nS/pF
GKr_ = 0.153; // nS/pF
GpK_ = 0.0146; // nS/pF
GpCa_ = 0.1238; // nS/pF
GNab_ = 2.9e-4; // nS/pF
GCab_ = 5.92e-4; // nS/pF
if (celltype == 'n') { //endo
Gto_ = 0.073 ;
GKs_ = 0.392 ;
}
else if (celltype == 'p') { //epi
Gto_ = 0.294 ;
GKs_ = 0.392 ;
}
else if (celltype == 'm') { //mid
Gto_ = 0.294; // nS/pF
GKs_ = 0.098; // nS/pF
}
PCa_ = 3.980e-5;
INaK_ = 2.724; // pA/pF
kNaCa = 1000 ; // pA/pF
Vleak = 3.6e-4 ; // ms^-1
Iup_ = 6.375 ; // uM/ms
Vrel = 0.102 ; // ms^-1 as per KWTT source code
}
else {
GNa_ = 14.838*randNums[(blockIdx.x*num_changing_vars)+0];
GK1_ = 5.405*randNums[(blockIdx.x*num_changing_vars)+1];
GKr_ = 0.153*randNums[(blockIdx.x*num_changing_vars)+2];
GpK_ = 0.0146*randNums[(blockIdx.x*num_changing_vars)+3];
GpCa_ = 0.1238*randNums[(blockIdx.x*num_changing_vars)+4];
GNab_ = 2.9e-4*randNums[(blockIdx.x*num_changing_vars)+5];
GCab_ = 5.92e-4*randNums[(blockIdx.x*num_changing_vars)+6];
if (celltype == 'n') { //endo
Gto_ = 0.073*randNums[(blockIdx.x*num_changing_vars)+7];
GKs_ = 0.392*randNums[(blockIdx.x*num_changing_vars)+8];
}
else if (celltype == 'p') { //epi
Gto_ = 0.294*randNums[(blockIdx.x*num_changing_vars)+7];
GKs_ = 0.392*randNums[(blockIdx.x*num_changing_vars)+8];
}
else if (celltype == 'm') { //mid
Gto_ = 0.294*randNums[(blockIdx.x*num_changing_vars)+7];
GKs_ = 0.098*randNums[(blockIdx.x*num_changing_vars)+8];
}
PCa_ = 3.980e-5*randNums[(blockIdx.x*num_changing_vars)+9];
INaK_ = 2.724*randNums[(blockIdx.x*num_changing_vars)+10];
kNaCa = 1000*randNums[(blockIdx.x*num_changing_vars)+11];
Vleak = 3.6e-4*randNums[(blockIdx.x*num_changing_vars)+12];
Iup_ = 6.375*randNums[(blockIdx.x*num_changing_vars)+13] ;
Vrel = 0.102*randNums[(blockIdx.x*num_changing_vars)+14] ;
}
ENa = RTF*log(Nao/x[cell_num+Nai_i]) ;
EK = RTF*log(Ko/x[cell_num+Ki_i]) ;
ECa = 0.5*RTF*log(Cao/x[cell_num+Cai_i]) ;
EKs = RTF*log((Ko + pKNa*Nao)/(x[cell_num+Ki_i] + pKNa*x[cell_num+Nai_i])) ;
INa = GNa_*pow(x[cell_num+m_i],3)*x[cell_num+h_i]*x[cell_num+j_i]*(x[cell_num+V_i] - ENa) ;
ICa = PCa_*x[cell_num+d_i]*x[cell_num+f_i]*x[cell_num+f2_i]*x[cell_num+fCass_i]*4*F/RTF*(x[cell_num+V_i]-15)*(0.25*x[cell_num+Cass_i]*exp(2*(x[cell_num+V_i]-15)/RTF) - Cao)/ (exp(2*(x[cell_num+V_i]-15)/RTF) - 1) ;
Ito = Gto_*x[cell_num+r_i]*x[cell_num+s_i]*(x[cell_num+V_i]-EK) ;
IKs = GKs_*pow(x[cell_num+xs_i],2)*(x[cell_num+V_i]-EKs) ;
IKr = GKr_*sqrt(Ko/5400)*x[cell_num+xr1_i]*x[cell_num+xr2_i]*(x[cell_num+V_i]-EK) ;
aK1 = 0.1/(exp(0.06*(x[cell_num+V_i]-EK-200)) + 1) ;
bK1 = ( 3*exp(2e-4*(x[cell_num+V_i]-EK+100)) + exp(0.1*(x[cell_num+V_i]-EK-10)) )/ (exp(-0.5*(x[cell_num+V_i]-EK)) + 1) ;
xK1 = aK1/(aK1+bK1) ;
IK1 = GK1_*sqrt(Ko/5400)*xK1*(x[cell_num+V_i]-EK) ;
INCX = kNaCa*pow((pow(KmNa,3) + pow(Nao,3)),-1)*pow((KmCa + Cao),-1)*pow((ksat*exp((eta-1)*x[cell_num+V_i]/RTF) + 1),-1)* (exp(eta*x[cell_num+V_i]/RTF)*pow(x[cell_num+Nai_i],3)*Cao - exp((eta-1)*x[cell_num+V_i]/RTF)*pow(Nao,3)*x[cell_num+Cai_i]*alpha_ncx) ;
fNaK = 1/(1 + 0.1245*exp(-0.1*x[cell_num+V_i]/RTF) + 0.0353*exp(-x[cell_num+V_i]/RTF)) ;
INaK = INaK_*Ko*x[cell_num+Nai_i]*fNaK/( (Ko + KmKo)*(x[cell_num+Nai_i] + KmNai) ) ;
IpCa = GpCa_*x[cell_num+Cai_i]/(x[cell_num+Cai_i] + KpCa) ;
IpK = GpK_*(x[cell_num+V_i]-EK)/(exp(-(x[cell_num+V_i]-25)/5.98) + 1) ;
INab = GNab_*(x[cell_num+V_i]-ENa) ;
ICab = GCab_*(x[cell_num+V_i]-ECa) ;
Iion = INa + ICa + Ito + IKs + IKr + IK1 + INCX + INaK + IpCa + IpK + INab + ICab ;
minf = 1/pow((exp((-56.86-x[cell_num+V_i])/9.03) + 1),2) ;
am = 1/(exp((-60-x[cell_num+V_i])/5) + 1) ;
bm = 0.1/(exp((x[cell_num+V_i]+35)/5) + 1) + 0.1/(exp((x[cell_num+V_i]-50)/200) + 1) ;
taum = am*bm ;
hinf = ( 1/pow((exp((x[cell_num+V_i]+71.55)/7.43) + 1),2)) ;
jinf = ( 1/pow((exp((x[cell_num+V_i]+71.55)/7.43) + 1),2)) ;
if (x[cell_num+V_i] >= -40) {
ah = 0 ;
bh = 0.77/(0.13*(exp((-x[cell_num+V_i]-10.66)/11.1) + 1)) ;
aj = 0 ;
bj = 0.6*exp(0.057*x[cell_num+V_i])/(exp(-0.1*(x[cell_num+V_i]+32)) + 1) ;
}
else {
ah = 0.057*exp((-x[cell_num+V_i]-80)/6.8) ;
bh = 2.7*exp(0.079*x[cell_num+V_i])+3.1e5*exp(0.3485*x[cell_num+V_i]) ;
aj = (-2.5428e4*exp(0.2444*x[cell_num+V_i])-6.948e-6*exp(-0.04391*x[cell_num+V_i]))*(x[cell_num+V_i]+37.78)/ (exp(0.311*(x[cell_num+V_i]+79.23))+1) ;
bj = 0.02424*exp(-0.01052*x[cell_num+V_i])/(exp(-0.1378*(x[cell_num+V_i]+40.14)) + 1);
}
tauh = 1/(ah+bh) ;
tauj = 1/(aj+bj) ;
dinf = 1/(exp(-(x[cell_num+V_i]+8)/7.5) + 1) ;
ad = 1.4/(exp(-(x[cell_num+V_i]+35)/13) + 1) + 0.25 ;
bd = 1.4/(exp((x[cell_num+V_i]+5)/5) + 1) ;
gd = 1/(exp((50-x[cell_num+V_i])/20) + 1) ;
taud = (ad*bd + gd) ;
finf = 1/(exp((x[cell_num+V_i]+20)/7) + 1) ;
af = 1102.5*exp(-pow((x[cell_num+V_i]+27),2)/225) ;
bf = 200/(1 + exp((13-x[cell_num+V_i])/10)) ;
gf = 180/(1 + exp((x[cell_num+V_i]+30)/10)) + 20 ;
tauf = (af + bf + gf) ;
f2inf = 0.67/(exp((x[cell_num+V_i]+35)/7) + 1) + 0.33 ;
af2 = 600*exp(-pow((x[cell_num+V_i]+25),2)/170) ;
bf2 = 31/(1 + exp((25-x[cell_num+V_i])/10)) ;
gf2 = 16/(1 + exp((x[cell_num+V_i]+30)/10)) ;
tauf2 = (af2 + bf2 + gf2) ;
fCassinf = 0.6/(1 + pow((x[cell_num+Cass_i]/50),2)) + 0.4 ;
taufCass = 80/(1 + pow((x[cell_num+Cass_i]/50),2)) + 2 ;
rinf = 1/(exp((20-x[cell_num+V_i])/6) + 1) ;
taur = (9.5*exp(-pow((x[cell_num+V_i]+40),2)/1800) + 0.8) ;
sinf = 1/(exp((x[cell_num+V_i]+20)/5) + 1) ;
taus = (85*exp(-pow((x[cell_num+V_i]+45),2)/320) + 5/(exp((x[cell_num+V_i]-20)/5) + 1) + 3) ;
xsinf = 1/(exp(-(x[cell_num+V_i]+5)/14) + 1) ;
axs = 1400/sqrt (exp(-(x[cell_num+V_i]-5)/6) + 1) ;
bxs = 1/(exp((x[cell_num+V_i]-35)/15) + 1) ;
tauxs = (axs*bxs + 80) ;
xr1inf = 1/(exp(-(x[cell_num+V_i]+26)/7) + 1) ;
axr1 = 450/(exp(-(x[cell_num+V_i]+45)/10) + 1) ;
bxr1 = 6/(exp((x[cell_num+V_i]+30)/11.5) + 1) ;
tauxr1 = (axr1*bxr1) ;
xr2inf = 1/(exp((x[cell_num+V_i]+88)/24) + 1) ;
axr2 = 3/(exp(-(x[cell_num+V_i]+60)/20) + 1) ;
bxr2 = 1.12/(exp((x[cell_num+V_i]-60)/20) + 1) ;
tauxr2 = (axr2*bxr2) ;
Ileak = Vleak*(x[cell_num+CaSR_i] - x[cell_num+Cai_i]) ;
Iup = Iup_/(pow((Kmup/x[cell_num+Cai_i]),2) + 1) ;
kcasr = maxsr - (maxsr - minsr)/(1 + pow((EC_ryr/x[cell_num+CaSR_i]),2)) ;
k1_ryr = k1_ryr_prime/kcasr ;
k2_ryr = k2_ryr_prime*kcasr ;
O_ryr = k1_ryr*pow(x[cell_num+Cass_i],2)*x[cell_num+Rbar_ryr_i]/(k3_ryr + k1_ryr*pow(x[cell_num+Cass_i],2)) ;
Irel = Vrel*O_ryr*(x[cell_num+CaSR_i] - x[cell_num+Cass_i]) ;
Ixfer = Vxfer*(x[cell_num+Cass_i] - x[cell_num+Cai_i]) ;
Bi = pow((1 + Bufc*Kbufc/pow((Kbufc + x[cell_num+Cai_i]),2)),-1) ;
Bss = pow((1 + Bufss*Kbufss/pow((Kbufss + x[cell_num+Cass_i]),2)),-1) ;
BSR = pow((1 + BufSR*KbufSR/pow((KbufSR + x[cell_num+CaSR_i]),2)),-1) ;
ion_current[cell_num] = Iion;
//new states into temp array
if ( !isinf(x[cell_num+m_i] + step*((minf-x[cell_num+m_i])/taum)) && !isnan(x[cell_num+m_i] + step*((minf-x[cell_num+m_i])/taum))) {
x_temp[cell_num+m_i] = x[cell_num+m_i] + step*((minf-x[cell_num+m_i])/taum) ;
}
else { x_temp[cell_num+m_i] = x[cell_num+m_i]; }
if ( !isinf(x[cell_num+h_i] + step*( (hinf-x[cell_num+h_i])/tauh)) && !isnan(x[cell_num+h_i] + step*( (hinf-x[cell_num+h_i])/tauh))) {
x_temp[cell_num+h_i] =x[cell_num+h_i] + step*( (hinf-x[cell_num+h_i])/tauh) ;
}
else { x_temp[cell_num+h_i] =x[cell_num+h_i]; }
if ( !isinf( x[cell_num+j_i] + step*( (jinf-x[cell_num+j_i])/tauj)) && !isnan( x[cell_num+j_i] + step*( (jinf-x[cell_num+j_i])/tauj))) {
x_temp[cell_num+j_i] = x[cell_num+j_i] + step*( (jinf-x[cell_num+j_i])/tauj) ;
}
else { x_temp[cell_num+j_i] = x[cell_num+j_i]; }
if ( !isinf(x[cell_num+d_i] + step*( (dinf-x[cell_num+d_i])/taud)) && !isnan(x[cell_num+d_i] + step*( (dinf-x[cell_num+d_i])/taud))) {
x_temp[cell_num+d_i] = x[cell_num+d_i] + step*( (dinf-x[cell_num+d_i])/taud) ;
}
else { x_temp[cell_num+d_i] = x[cell_num+d_i]; }
if ( !isinf(x[cell_num+f_i] + step*((finf-x[cell_num+f_i])/tauf)) && !isnan(x[cell_num+f_i] + step*((finf-x[cell_num+f_i])/tauf))) {
x_temp[cell_num+f_i] = x[cell_num+f_i] + step*((finf-x[cell_num+f_i])/tauf) ;
}
else { x_temp[cell_num+f_i] = x[cell_num+f_i]; }
if ( !isinf(x[cell_num+f2_i] + step*( (f2inf-x[cell_num+f2_i])/tauf2)) && !isnan(x[cell_num+f2_i] + step*( (f2inf-x[cell_num+f2_i])/tauf2))) {
x_temp[cell_num+f2_i] = x[cell_num+f2_i] + step*( (f2inf-x[cell_num+f2_i])/tauf2) ;
}
else { x_temp[cell_num+f2_i] = x[cell_num+f2_i]; }
if ( !isinf(x[cell_num+fCass_i] + step*( (fCassinf-x[cell_num+fCass_i])/taufCass)) && !isnan(x[cell_num+fCass_i] + step*( (fCassinf-x[cell_num+fCass_i])/taufCass))) {
x_temp[cell_num+fCass_i] = x[cell_num+fCass_i] + step*( (fCassinf-x[cell_num+fCass_i])/taufCass) ;
}
else { x_temp[cell_num+fCass_i] = x[cell_num+fCass_i]; }
if ( !isinf(x[cell_num+r_i] + step*((rinf-x[cell_num+r_i])/taur)) && !isnan(x[cell_num+r_i] + step*((rinf-x[cell_num+r_i])/taur))) {
x_temp[cell_num+r_i] = x[cell_num+r_i] + step*((rinf-x[cell_num+r_i])/taur) ;
}
else { x_temp[cell_num+r_i] = x[cell_num+r_i]; }
if ( !isinf(x[cell_num+s_i] + step*((sinf-x[cell_num+s_i])/taus)) && !isnan(x[cell_num+s_i] + step*((sinf-x[cell_num+s_i])/taus))) {
x_temp[cell_num+s_i] = x[cell_num+s_i] + step*((sinf-x[cell_num+s_i])/taus) ;
}
else { x_temp[cell_num+s_i] = x[cell_num+s_i]; }
if ( !isinf(x[cell_num+xs_i] + step*((xsinf-x[cell_num+xs_i])/tauxs)) && !isnan(x[cell_num+xs_i] + step*((xsinf-x[cell_num+xs_i])/tauxs))) {
x_temp[cell_num+xs_i] = x[cell_num+xs_i] + step*((xsinf-x[cell_num+xs_i])/tauxs) ;
}
else { x_temp[cell_num+xs_i] = x[cell_num+xs_i]; }
if ( !isinf(x[cell_num+xr1_i] + step*( (xr1inf-x[cell_num+xr1_i])/tauxr1)) && !isnan(x[cell_num+xr1_i] + step*( (xr1inf-x[cell_num+xr1_i])/tauxr1))) {
x_temp[cell_num+xr1_i] = x[cell_num+xr1_i] + step*( (xr1inf-x[cell_num+xr1_i])/tauxr1) ;
}
else { x_temp[cell_num+xr1_i] = x[cell_num+xr1_i]; }
if ( !isinf(x[cell_num+xr2_i] + step*( (xr2inf-x[cell_num+xr2_i])/tauxr2)) && !isnan(x[cell_num+xr2_i] + step*( (xr2inf-x[cell_num+xr2_i])/tauxr2))) {
x_temp[cell_num+xr2_i] = x[cell_num+xr2_i] + step*( (xr2inf-x[cell_num+xr2_i])/tauxr2) ;
}
else { x_temp[cell_num+xr2_i] = x[cell_num+xr2_i];}
if ( !isinf(x[cell_num+Rbar_ryr_i] + step*( -k2_ryr*x[cell_num+Cass_i]*x[cell_num+Rbar_ryr_i] + k4_ryr*(1 - x[cell_num+Rbar_ryr_i]))) && !isnan(x[cell_num+Rbar_ryr_i] + step*( -k2_ryr*x[cell_num+Cass_i]*x[cell_num+Rbar_ryr_i] + k4_ryr*(1 - x[cell_num+Rbar_ryr_i])))) {
x_temp[cell_num+Rbar_ryr_i] = x[cell_num+Rbar_ryr_i] + step*( -k2_ryr*x[cell_num+Cass_i]*x[cell_num+Rbar_ryr_i] + k4_ryr*(1 - x[cell_num+Rbar_ryr_i])) ;
}
else { x_temp[cell_num+Rbar_ryr_i] = x[cell_num+Rbar_ryr_i]; }
if ( !isinf(x[cell_num+Cai_i] + step* (Bi*(-(IpCa + ICab - 2*INCX)*1e6*Acap/(2*F*Vmyo) + (VSR/Vmyo)*(Ileak - Iup) + Ixfer))) && !isnan(x[cell_num+Cai_i] + step* (Bi*(-(IpCa + ICab - 2*INCX)*1e6*Acap/(2*F*Vmyo) + (VSR/Vmyo)*(Ileak - Iup) + Ixfer)))) {
x_temp[cell_num+Cai_i] = x[cell_num+Cai_i] + step* (Bi*(-(IpCa + ICab - 2*INCX)*1e6*Acap/(2*F*Vmyo) + (VSR/Vmyo)*(Ileak - Iup) + Ixfer)) ;
}
else { x_temp[cell_num+Cai_i] = x[cell_num+Cai_i]; }
if ( !isinf(x[cell_num+Cass_i] + step*( (Bss*(-ICa*1e6*Acap/(2*F*Vss) + VSR/Vss*Irel - Vmyo/Vss*Ixfer)))) && !isnan(x[cell_num+Cass_i] + step*( (Bss*(-ICa*1e6*Acap/(2*F*Vss) + VSR/Vss*Irel - Vmyo/Vss*Ixfer)))) ) {
x_temp[cell_num+Cass_i] = x[cell_num+Cass_i] + step*( (Bss*(-ICa*1e6*Acap/(2*F*Vss) + VSR/Vss*Irel - Vmyo/Vss*Ixfer))) ;
}
else { x_temp[cell_num+Cass_i] = x[cell_num+Cass_i];}
if ( !isinf(x[cell_num+CaSR_i] + step* (BSR*(Iup - Ileak - Irel))) && !isnan(x[cell_num+CaSR_i] + step* (BSR*(Iup - Ileak - Irel)))) {
x_temp[cell_num+CaSR_i] = x[cell_num+CaSR_i] + step* (BSR*(Iup - Ileak - Irel)) ;
}
else { x_temp[cell_num+CaSR_i] = x[cell_num+CaSR_i]; }
if ( !isinf(x[cell_num+Nai_i] + step*( -(INa + 3*INCX + 3*INaK + INab)*1e6*Acap/(F*Vmyo))) && !isnan(x[cell_num+Nai_i] + step*( -(INa + 3*INCX + 3*INaK + INab)*1e6*Acap/(F*Vmyo)))) {
x_temp[cell_num+Nai_i] = x[cell_num+Nai_i] + step*( -(INa + 3*INCX + 3*INaK + INab)*1e6*Acap/(F*Vmyo)) ;
}
else { x_temp[cell_num+Nai_i] = x[cell_num+Nai_i]; }
if ( !isinf(x[cell_num+Ki_i] + step*(-(Ito + IKs + IKr + IK1 - 2*INaK + IpK)*1e5*Acap/(F*Vmyo))) && !isnan(x[cell_num+Ki_i] + step*(-(Ito + IKs + IKr + IK1 - 2*INaK + IpK)*1e5*Acap/(F*Vmyo)))) {
x_temp[cell_num+Ki_i] = x[cell_num+Ki_i] + step*(-(Ito + IKs + IKr + IK1 - 2*INaK + IpK)*1e5*Acap/(F*Vmyo)) ;
}
else { x_temp[cell_num+Ki_i] = x[cell_num+Ki_i]; }
}
}
__global__ void updateState(double* x, double* x_temp, int num_cells, int cells_per_thread) {
int i;
int idx = cells_per_thread*threadIdx.x;
int variations = blockIdx.x;
int limit = idx+cells_per_thread;
for (;idx<limit;idx++) {
for (i=1;i<19;i++) {
x[(variations*19*num_cells) + idx +(i*num_cells)] = x_temp[(variations*19*num_cells) + idx +(i*num_cells)];
}
}
}
__global__ void compute_voltage(double* x, double* V, double* Iion, double step, double* randNums, int variations, int length, int width, int num_changing_vars, int time, double stimDur, double stimAmp, int tstim, double* s2time, int cells_per_thread, bool s2_analysis, int s2_loc) {
int num_cells = length*width;
int m;
int n;
double stim = 0.0;
double Istim1 = 0.0;
double Istim2 = 0.0;
double Vnet_R, Vnet_L, Vnet_U, Vnet_D;
double rad = 0.0011 ;
double deltx = 0.01 ;
double rho;
double Cm = 2 ;
double Rmyo;
double gj;
int tstim2;
int idx = cells_per_thread*threadIdx.x;
int limit = idx+cells_per_thread;
for (;idx<limit;idx++) {
m = (blockIdx.x * num_cells) + idx;
n = (blockIdx.x * num_cells*19) + idx;
if (num_changing_vars==0) {
gj = 1.27 ;
Rmyo = 526;
}
else {
gj = 1.27*randNums[(blockIdx.x*num_changing_vars)+15] ;
Rmyo = 526*randNums[(blockIdx.x*num_changing_vars)+16];
}
rho = 3.14159*pow(rad,2)*(Rmyo+1000/gj)/deltx; // total resistivity
if (s2_analysis) {
tstim2 = s2time[blockIdx.x]/step;
}
if ( time%tstim > (stimDur/step)) {Istim1 = 0.0;}
else { Istim1 = stimAmp;}
if (s2_analysis) {
if ( time>=tstim2 && time<=(stimDur/step)+tstim2) {Istim2 = -150;}
else {Istim2 = 0.0;}
}
// Cable Model
if (width==1) {
if(idx==0) { //first + stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*((x[n+1]-x[n])) - (Iion[n]+Istim1) /Cm )) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*((x[n+1]-x[n])) - (Iion[n]+Istim1) /Cm )) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*((x[n+1]-x[n])) - (Iion[n]+Istim1) /Cm ) ;
//V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n] + x[n+length-1]) - (Iion[n]+Istim1) /Cm ) ; // loop
}
else { V[m] = x[n];}
}
else if(idx==num_cells-1){ //last
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(-x[n] + x[n-1]) - (Iion[n]) /Cm )) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(-x[n] + x[n-1]) - (Iion[n]) /Cm )) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(-x[n] + x[n-1]) - (Iion[n]) /Cm );
//V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1-length] - 2*x[n] + x[n-1]) - (Iion[n]) /Cm ); // loop
}
else { V[m] = x[n]; }
}
else if(idx==1) { //need stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm)) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm)) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm);
}
else { V[m] = x[n]; }
}
else if(idx==2) { //need stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm)) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm)) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim1)/ Cm);
}
else { V[m] = x[n]; }
}
//stim2
else if (s2_analysis && s2_loc == idx) { //need stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm);
}
else { V[m] = x[n]; }
}
else if (s2_analysis && s2_loc+1 == idx) { //need stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm);
}
else { V[m] = x[n]; }
}
else if (s2_analysis && s2_loc-1 == idx) { //need stimulus
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm)) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(x[n+1]-2*x[n]+x[n-1]) - (Iion[n]+Istim2)/ Cm);
}
else { V[m] = x[n]; }
}
else {
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*( (x[n-1]-x[n]) + (x[n+1]-x[n]) ) - (Iion[n]) /Cm ) ) && !isnan ((x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*( (x[n-1]-x[n]) + (x[n+1]-x[n]) ) - (Iion[n]) /Cm )) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*( (x[n-1]-x[n]) + (x[n+1]-x[n]) ) - (Iion[n]) /Cm );
}
else { V[m] = x[n]; }
}
}
//Tissue Model
else {
// set which cells will get a stimulus
if (idx==0 || idx==1) { stim=Istim1; }
if (idx==2 || idx==0+length) { stim=Istim1; }
if (idx==1+length || idx==2+length) { stim=Istim1; }
if (idx==0+2*length || idx==1+2*length) { stim=Istim1; }
if (idx==2+2*length) { stim=Istim1; }
if ( threadIdx.x>=0 && threadIdx.x<=length-1 ) { // Top Edge
Vnet_U = 0.0;
}
else {
Vnet_U = x[n-length] - x[n];
}
if ( threadIdx.x>=((width*length)-length) && threadIdx.x<=((width*length)-1) ) { // Bottom Edge
Vnet_D = 0.0;
}
else {
Vnet_D = x[n+length] - x[n];
}
if ( threadIdx.x%length==0 ) { // Left Edge
Vnet_L = 0.0;
//Vnet_L = x[n+length-1] - x[n]; // tissue loop
}
else {
Vnet_L = x[n-1] - x[n];
}
if ( threadIdx.x%length==(length-1) ) { // Right Edge
Vnet_R = 0.0;
//Vnet_R = x[n+1-length] - x[n]; // tissue loop
}
else {
Vnet_R = x[n+1] - x[n];
}
if ( !isinf( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(Vnet_R+Vnet_L+Vnet_U+Vnet_D) - (Iion[n]+stim) /Cm )) && !isnan( (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(Vnet_R+Vnet_L+Vnet_U+Vnet_D) - (Iion[n]+stim) /Cm )) ) {
V[m] = (x[n]) + (step)*( rad/(2*rho*Cm*deltx*deltx)*(Vnet_R+Vnet_L+Vnet_U+Vnet_D) - (Iion[n]+stim) /Cm ) ;
}
else { V[m] = x[n]; }
}
}
}
__global__ void update_voltage(double* x, double* V, int total_cells, int cells_per_thread) {
int idx = cells_per_thread*threadIdx.x;
int limit = idx+cells_per_thread;
for (;idx<limit;idx++) {
int m = (blockIdx.x * total_cells) + idx;
int n = (blockIdx.x * total_cells*19) + idx;
x[n] = V[m];
}
}
__global__ void computeVelocity(double* voltage, int iterations, int num_cells, double* vel, double time, int length, int width) {
double startTP = 0.0;
double endTP = 0.0;
double deltx = 0.01 ;
int i,k;
int idx = threadIdx.x;
double distance;
int start = idx*num_cells*iterations;
for (i=0;i<iterations;i++) { // Looking at first cell voltage only
if (voltage[start+i] >= -55) {
startTP = (i-start)*time;
break;
}
}
for (k=(iterations*(num_cells-1));k<(iterations*num_cells);k++) { // Looking at last cell voltage only
if (voltage[start+k] >= -55) {
endTP = ((k-start) -(iterations*(num_cells-1)))*time;
break;
}
}
//distance = ;
vel[idx] =endTP-startTP;
}
__global__ void init_randomNums(curandState *state) {
int idx = blockDim.x*blockIdx.x + threadIdx.x ;
curand_init(1337, idx, 0, &state[idx]);
}
__global__ void make_randomNums(curandState *state, double* randArray) {
int idx = blockDim.x*blockIdx.x + threadIdx.x ;
double sigma = 0.15; //SD of variation of parameter
randArray[idx] = exp(sigma*curand_normal(&state[idx]));
}
__global__ void initialize_time_s2(double begin_time, double interval, double* time_s2) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
time_s2[idx] = begin_time+(interval*idx);
}
__global__ void percentage_excited(double* V_array, int iterations, int num_cells, double* percent, int variations) {
int idx = threadIdx.x;
int last_row = iterations-1;
int start = last_row + (idx*num_cells*iterations);
int num_excited = 0;
int i;
for (i=0;i<num_cells;i++) {
if(V_array[start+(i*iterations)] >= -55) {
num_excited++;
}
}
percent[idx] = ((double)num_excited/(double)num_cells);
}
int main( int argc, const char* argv[] )
{
int i, ii;
int time = 0;
FILE *fV = fopen("tt06 GPU Voltage", "w");
FILE *ft = fopen("tt06 GPU Time", "w");
FILE *output = fopen("tt06 GPU Sensitivity Analysis", "w");
FILE *s2output = fopen("tt06 GPU s2 Analysis", "w");
int index=0;
double* host_vars;
double* dev_vars;
double* dev_ion_currents;
double* dev_x_temp;
double* host_Vtemp;
double* dev_Vtemp;
double* V_array;
double* t_array;
double* dev_V_array;
double* dev_vel;
double* vel;
double* s2_times;
double* s2_times_dev;
double* percent_excited;
double* dev_percent_excited;
cudaEvent_t start,stop;
float elapsedTime;
curandState *rndState;
double* dev_randNums;
double* randNums;
int size;
double begin_time;
double end_time;
double test_interval;
int total_s2_times;
int s2_loc;
//Number of Parameters in the Model
int num_param = 19;
// Assume only running 1 simulation initially
int simulations = 1;
// Time Step Variables
double step = 0.002;
double tend = 10;
int iterations = tend/step;
double skip_time_value = 0.5; //ms
int skip_timept = skip_time_value/step; // skipping time points in voltage array & time array
int total_timepts = iterations/skip_timept;
// Number of Cells
int length = 10;
int width = 1;
int num_cells = length*width;
int cells_per_thread = 1; // for cell numbers > 500, one thread may need to work on more than one cell
//Stimulus Variables
double stimDur = 2.0;
double stimAmp = -80.0;
double stimInterval = 1000;
int tstim = stimInterval/step;
// Sensitivity Analysis?
//int num_changing_vars = 18;
int num_changing_vars = 0;
// S2 Analysis?
bool s2_analysis = false;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
if (s2_analysis) {
begin_time = 900;
end_time = 930;
test_interval = 10;
s2_loc = 5;
total_s2_times = (end_time-begin_time)/test_interval; //make sure not too many threads
simulations = total_s2_times;
s2_times = (double*)malloc(sizeof(double)*total_s2_times);
cudaMalloc( &s2_times_dev, sizeof(double)*total_s2_times);
initialize_time_s2<<<1,total_s2_times>>>(begin_time, test_interval, s2_times_dev);
cudaMemcpy(s2_times, s2_times_dev, total_s2_times*sizeof(double), cudaMemcpyDeviceToHost);
}
size = num_param*num_cells*simulations;
if (num_changing_vars != 0) {
simulations = 10;
cudaMalloc( &rndState, simulations*num_changing_vars);
cudaMalloc( &dev_randNums, sizeof(double)*simulations*num_changing_vars);
randNums = (double*)malloc(sizeof(double)*simulations*num_changing_vars);
init_randomNums<<<simulations,num_changing_vars>>>(rndState);
make_randomNums<<<simulations,num_changing_vars>>>(rndState, dev_randNums);
}
size = num_param*num_cells*simulations;
// vars array contains voltage&state vartiables for all cells across all simulations
host_vars = (double *)malloc(sizeof(double)*size);
cudaMalloc( &dev_vars, sizeof(double)*size);
// results of the computeState kernel
cudaMalloc( &dev_ion_currents, sizeof(double)*num_cells*simulations);
cudaMalloc( &dev_x_temp, sizeof(double)*size);
// result of the computeVoltage kernel
host_Vtemp = (double*)malloc(sizeof(double)*num_cells*simulations);
cudaMalloc( &dev_Vtemp, sizeof(double)*num_cells*simulations);
V_array = (double*)malloc(sizeof(double)*(total_timepts*num_cells*simulations));
t_array = (double*)malloc(sizeof(double)*(total_timepts*simulations));
fprintf(fV, "V = [ \n");
// Initialize vars array with initial conditions
initialConditions<<<simulations,(num_cells/cells_per_thread)>>>(dev_vars,num_param,num_cells, cells_per_thread);
while (time<iterations) {
computeState<<<simulations,(num_cells/cells_per_thread)>>>(dev_vars, dev_ion_currents, num_cells, step, dev_randNums, simulations, dev_x_temp, num_changing_vars, cells_per_thread);
updateState<<<simulations,(num_cells/cells_per_thread)>>>(dev_vars, dev_x_temp, num_cells, cells_per_thread);
compute_voltage<<<simulations,(num_cells/cells_per_thread)>>>(dev_vars, dev_Vtemp, dev_ion_currents, step, dev_randNums, simulations, length, width, num_changing_vars, time, stimDur, stimAmp, tstim, s2_times_dev, cells_per_thread, s2_analysis, s2_loc);
update_voltage<<<simulations,(num_cells/cells_per_thread)>>>(dev_vars, dev_Vtemp, num_cells, cells_per_thread);
//update Voltage and time arrays and write data to file
cudaMemcpy(host_Vtemp, dev_Vtemp, num_cells*simulations*sizeof(double), cudaMemcpyDeviceToHost);
if (time%skip_timept == 0) {
for (i=0;i<num_cells*simulations;i++) {
V_array[ (i*(iterations/skip_timept)) +index] = host_Vtemp[i];
fprintf(fV, "%f\t ", host_Vtemp[i]);
}
fprintf(fV, "\n");
fprintf(ft, "%f \n", time*step);
for (i=0;i<simulations;i++) {
t_array[(index*simulations)+i] = time*step;
}
index++;
}
time++;
}
fprintf(fV, "]; \n");
/*
The Model Computations are Finished
This last section of code is only writing data to file(s) and cleaning up the memory
*/
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
free(host_vars);
cudaFree(dev_vars);
cudaFree(dev_ion_currents);
cudaFree(dev_x_temp);
free(host_Vtemp);
cudaFree(dev_Vtemp);
printf("Elapsed Time = %f s \n",elapsedTime/1000);
printf("\n");
printf("Calculating Simulation outputs...\n");
printf("\n");
if (num_changing_vars != 0) {
vel = (double*)malloc(sizeof(double)*simulations);
cudaMalloc( &dev_vel,(sizeof(double)*simulations));
cudaMalloc( &dev_V_array, sizeof(double)*(total_timepts*num_cells*simulations));
cudaMemcpy(dev_V_array, V_array, sizeof(double)*(total_timepts*num_cells*simulations), cudaMemcpyHostToDevice);
computeVelocity<<<1,simulations>>>(dev_V_array, total_timepts, num_cells, dev_vel, step*skip_timept, length, width);
cudaMemcpy(vel, dev_vel, simulations*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(randNums, dev_randNums, num_changing_vars*simulations*sizeof(double), cudaMemcpyDeviceToHost);
fprintf(output, "A = [ \n");
for (i=0;i<simulations;i++) {
for (ii=0;ii<num_changing_vars;ii++) {
fprintf(output, "%f\t", randNums[(i*num_changing_vars)+ii]);
}
fprintf(output, "\n");
}
fprintf(output, "]; \n");
fprintf(output, "\n");
fprintf(output, "Vel = [ \n");
for (i=0;i<simulations;i++) {
fprintf(output, "%f\n", vel[i]);
}
fprintf(output, "]; \n");
cudaFree(rndState);
cudaFree(dev_randNums);
free(randNums);
free(vel);
cudaFree(dev_vel);
}
if (s2_analysis) {
cudaMalloc( &dev_V_array, sizeof(double)*(total_timepts*num_cells*simulations));
cudaMemcpy(dev_V_array, V_array, sizeof(double)*(total_timepts*num_cells*simulations), cudaMemcpyHostToDevice);
percent_excited = (double*)malloc(sizeof(double)*total_s2_times);
cudaMalloc( &dev_percent_excited, sizeof(double)*total_s2_times);
percentage_excited<<<1,total_s2_times>>>(dev_V_array, total_timepts, num_cells, dev_percent_excited, simulations);
cudaMemcpy(percent_excited, dev_percent_excited, total_s2_times*sizeof(double), cudaMemcpyDeviceToHost);
fprintf(s2output, "A = [ \n");
for (i=0;i<simulations;i++) {
fprintf(s2output, "%f\n", s2_times[i]);
}
fprintf(s2output, "]; \n");
fprintf(s2output, "\n");
fprintf(s2output, "% = [ \n");
for (i=0;i<simulations;i++) {
fprintf(s2output, "%f\n", percent_excited[i]);
}
fprintf(s2output, "]; \n");
free(percent_excited);
cudaFree(dev_percent_excited);
}
free(V_array);
cudaFree(dev_V_array);
printf("Program is Done\n");
}
|
8130920eaceff34fbc5c8e2eb53a39b92cb3d227.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <linalg/coalesced_reduction.cuh>
#include <random/rng.cuh>
#include "reduce_hip.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct coalescedReductionInputs {
T tolerance;
int rows, cols;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const coalescedReductionInputs<T> &dims) {
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T>
void coalescedReductionLaunch(T *dots, const T *data, int cols, int rows,
hipStream_t stream, bool inplace = false) {
coalescedReduction(dots, data, cols, rows, (T)0, stream, inplace,
[] __device__(T in, int i) { return in * in; });
}
template <typename T>
class coalescedReductionTest
: public ::testing::TestWithParam<coalescedReductionInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<coalescedReductionInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(data, len);
raft::allocate(dots_exp, rows);
raft::allocate(dots_act, rows);
r.uniform(data, len, T(-1.0), T(1.0), stream);
naiveCoalescedReduction(dots_exp, data, cols, rows, stream);
// Perform reduction with default inplace = false first
coalescedReductionLaunch(dots_act, data, cols, rows, stream);
// Add to result with inplace = true next
coalescedReductionLaunch(dots_act, data, cols, rows, stream, true);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(dots_exp));
CUDA_CHECK(hipFree(dots_act));
}
protected:
coalescedReductionInputs<T> params;
T *data, *dots_exp, *dots_act;
};
const std::vector<coalescedReductionInputs<float>> inputsf = {
{0.000002f, 1024, 32, 1234ULL},
{0.000002f, 1024, 64, 1234ULL},
{0.000002f, 1024, 128, 1234ULL},
{0.000002f, 1024, 256, 1234ULL}};
const std::vector<coalescedReductionInputs<double>> inputsd = {
{0.000000001, 1024, 32, 1234ULL},
{0.000000001, 1024, 64, 1234ULL},
{0.000000001, 1024, 128, 1234ULL},
{0.000000001, 1024, 256, 1234ULL}};
typedef coalescedReductionTest<float> coalescedReductionTestF;
TEST_P(coalescedReductionTestF, Result) {
ASSERT_TRUE(raft::devArrMatch(dots_exp, dots_act, params.rows,
raft::CompareApprox<float>(params.tolerance)));
}
typedef coalescedReductionTest<double> coalescedReductionTestD;
TEST_P(coalescedReductionTestD, Result) {
ASSERT_TRUE(raft::devArrMatch(dots_exp, dots_act, params.rows,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(coalescedReductionTests, coalescedReductionTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(coalescedReductionTests, coalescedReductionTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
|
8130920eaceff34fbc5c8e2eb53a39b92cb3d227.cu
|
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <linalg/coalesced_reduction.cuh>
#include <random/rng.cuh>
#include "reduce.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct coalescedReductionInputs {
T tolerance;
int rows, cols;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const coalescedReductionInputs<T> &dims) {
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T>
void coalescedReductionLaunch(T *dots, const T *data, int cols, int rows,
cudaStream_t stream, bool inplace = false) {
coalescedReduction(dots, data, cols, rows, (T)0, stream, inplace,
[] __device__(T in, int i) { return in * in; });
}
template <typename T>
class coalescedReductionTest
: public ::testing::TestWithParam<coalescedReductionInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<coalescedReductionInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(data, len);
raft::allocate(dots_exp, rows);
raft::allocate(dots_act, rows);
r.uniform(data, len, T(-1.0), T(1.0), stream);
naiveCoalescedReduction(dots_exp, data, cols, rows, stream);
// Perform reduction with default inplace = false first
coalescedReductionLaunch(dots_act, data, cols, rows, stream);
// Add to result with inplace = true next
coalescedReductionLaunch(dots_act, data, cols, rows, stream, true);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(dots_exp));
CUDA_CHECK(cudaFree(dots_act));
}
protected:
coalescedReductionInputs<T> params;
T *data, *dots_exp, *dots_act;
};
const std::vector<coalescedReductionInputs<float>> inputsf = {
{0.000002f, 1024, 32, 1234ULL},
{0.000002f, 1024, 64, 1234ULL},
{0.000002f, 1024, 128, 1234ULL},
{0.000002f, 1024, 256, 1234ULL}};
const std::vector<coalescedReductionInputs<double>> inputsd = {
{0.000000001, 1024, 32, 1234ULL},
{0.000000001, 1024, 64, 1234ULL},
{0.000000001, 1024, 128, 1234ULL},
{0.000000001, 1024, 256, 1234ULL}};
typedef coalescedReductionTest<float> coalescedReductionTestF;
TEST_P(coalescedReductionTestF, Result) {
ASSERT_TRUE(raft::devArrMatch(dots_exp, dots_act, params.rows,
raft::CompareApprox<float>(params.tolerance)));
}
typedef coalescedReductionTest<double> coalescedReductionTestD;
TEST_P(coalescedReductionTestD, Result) {
ASSERT_TRUE(raft::devArrMatch(dots_exp, dots_act, params.rows,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(coalescedReductionTests, coalescedReductionTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(coalescedReductionTests, coalescedReductionTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
|
beecf044abe023d30aace1f3e4fd5885d8a364f7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <omp.h>
#include <stdio.h>
#include <math.h>
#include "jacobi_gpu.h"
#define d_u(i,j) d_U[(i)*N + (j)]
#define d_u_old(i,j) d_U_old[(i)*N + (j)]
#define h_u(i,j) h_U[(i)*N + (j)]
#define h_u_old(i,j) h_U_old[(i)*N + (j)]
#define f(i,j) F[(i)*N + (j)]
// Declarations
void init_F(int N, int *F);
void print_matrix(int m, int n, double *matrix);
void print_F(int m, int n, int *mat);
int main(int argc, char *argv[]) {
int max_it, N, *h_fu, type;
double *h_U, *h_U_old;
int i, j, k = 0;
double h = 1.0 / 4.0;
N = atoi(argv[1]);
max_it = atoi(argv[2]);
type = atoi(argv[3]);
double delta = 2/((double)N - 1.0), delta_sq = delta * delta;
//printf("%d", N);
//printf("%d", max_it);
//U = (double *)malloc(N * N * sizeof(double));
//U_old = (double *)malloc(N * N * sizeof(double));
//fu = (int *)malloc(N * N * sizeof(int));
double size_double = N * N * sizeof(double);
int size_int = N * N * sizeof(int);
hipHostMalloc((void **)&h_U, size_double);
hipHostMalloc((void **)&h_U_old, size_double);
hipHostMalloc((void **)&h_fu, size_int);
init_F(N, h_fu);
#pragma omp parallel firstprivate(h_U,h_U_old) private(i,j) \
shared(N)
{
// Initialize U and U_old
#pragma omp for
for (i=0; i<N; i++) {
h_u_old(i,0) = 20.0;
h_u_old(0,i) = 20.0;
h_u_old(i,N-1) = 20.0;
h_u(i,0) = 20.0;
h_u(0,i) = 20.0;
h_u(i,N-1) = 20.0;
}
#pragma omp for
for (i=1; i<N; i++) {
for (j=1; j<N-1; j++) {
h_u_old(i,j) = 0.0;
h_u(i,j) = 0.0;
}
}
} // end omp parallel
//print_matrix(N,N,h_U_old);
if (type == 1){
double *d_U, *d_U_old, *tmp;
int *d_fu;
hipMalloc((void **)&d_U, size_double);
hipMalloc((void **)&d_U_old, size_double);
hipMalloc((void **)&d_fu, size_int);
dim3 dimBlock(1,1,1);
dim3 dimGrid(1,1,1);
hipMemcpy(d_U, h_U, size_double, hipMemcpyHostToDevice);
hipMemcpy(d_U_old, h_U_old, size_double, hipMemcpyHostToDevice);
hipMemcpy(d_fu, h_fu, size_int, hipMemcpyHostToDevice);
while (k < max_it) {
hipLaunchKernelGGL(( jacobi_1), dim3(dimGrid),dim3(dimBlock), 0, 0, N, d_U, d_U_old, d_fu, h, delta_sq);
hipDeviceSynchronize();
//Swap Pointers
tmp = d_U;
d_U = d_U_old;
d_U_old = tmp;
k++;
}
hipMemcpy(h_U, d_U, size_double, hipMemcpyDeviceToHost);
hipMemcpy(h_U_old, d_U_old, size_double, hipMemcpyDeviceToHost);
hipMemcpy(h_fu, d_fu, size_int, hipMemcpyDeviceToHost);
//print_matrix(N,N,h_U);
hipHostFree(h_U);
hipHostFree(h_U_old);
hipHostFree(h_fu);
hipFree(d_U);
hipFree(d_U_old);
hipFree(d_fu);
} else if (type == 2){
double *d_U, *d_U_old, *tmp;
int *d_fu;
hipMalloc((void **)&d_U, size_double);
hipMalloc((void **)&d_U_old, size_double);
hipMalloc((void **)&d_fu, size_int);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid(ceil((double)N/dimBlock.x), ceil((double)N/dimBlock.y), 1); // Num blocks
hipMemcpy(d_U, h_U, size_double, hipMemcpyHostToDevice);
hipMemcpy(d_U_old, h_U_old, size_double, hipMemcpyHostToDevice);
hipMemcpy(d_fu, h_fu, size_int, hipMemcpyHostToDevice);
while (k < max_it) {
hipLaunchKernelGGL(( jacobi_2), dim3(dimGrid),dim3(dimBlock), 0, 0, N, d_U, d_U_old, d_fu, h, delta_sq);
hipDeviceSynchronize();
//Swap Pointers
tmp = d_U;
d_U = d_U_old;
d_U_old = tmp;
k++;
}
hipMemcpy(h_U, d_U, size_double, hipMemcpyDeviceToHost);
hipMemcpy(h_U_old, d_U_old, size_double, hipMemcpyDeviceToHost);
hipMemcpy(h_fu, d_fu, size_int, hipMemcpyDeviceToHost);
//print_matrix(N,N,h_U);
hipHostFree(h_U);
hipHostFree(h_U_old);
hipHostFree(h_fu);
hipFree(d_U);
hipFree(d_U_old);
hipFree(d_fu);
} else if (type == 3 && N % 2 == 0){ //N has to be an even number
//int access0from1, access1from0, ; //access gpu 0 from gpu 1
int flags, gpu_0 = 0, gpu_1 = 1;
// Enable peer access
//hipDeviceCanAccessPeer(&access0from1, gpu_1, gpu_0);
//hipDeviceCanAccessPeer(&access1from0, gpu_0, gpu_1);
hipSetDevice(gpu_0);
hipDeviceEnablePeerAccess(gpu_1,flags=0);
hipSetDevice(gpu_1);
hipDeviceEnablePeerAccess(gpu_0,flags=0);
// Do operations
double *d0_U, *d0_U_old, *d1_U, *d1_U_old, *tmp0, *tmp1;
int *d0_fu, *d1_fu;
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid(ceil((double)N/dimBlock.x), ceil(((double)N/2.0)/dimBlock.y), 1); // Num blocks
// Device 0
hipSetDevice(gpu_0);
hipMalloc((void **)&d0_U, size_double/2.0);
hipMalloc((void **)&d0_U_old, size_double/2.0);
hipMalloc((void **)&d0_fu, size_int/2);
hipMemcpy(d0_U, h_U, size_double/2.0, hipMemcpyHostToDevice);
hipMemcpy(d0_U_old, h_U_old, size_double/2.0, hipMemcpyHostToDevice);
hipMemcpy(d0_fu, h_fu, size_int/2, hipMemcpyHostToDevice);
// Device 1
hipSetDevice(gpu_1);
hipMalloc((void **)&d1_U, size_double/2.0);
hipMalloc((void **)&d1_U_old, size_double/2.0);
hipMalloc((void **)&d1_fu, size_int/2);
hipMemcpy(d1_U, h_U + (N*N)/2, size_double/2.0, hipMemcpyHostToDevice);
hipMemcpy(d1_U_old, h_U_old + (N*N)/2, size_double/2.0, hipMemcpyHostToDevice);
hipMemcpy(d1_fu, h_fu + (N*N)/2, size_int/2, hipMemcpyHostToDevice);
while (k < max_it) {
hipSetDevice(gpu_0);
hipLaunchKernelGGL(( jacobi_3_0), dim3(dimGrid),dim3(dimBlock), 0, 0, N, d0_U, d0_U_old, d1_U_old, d0_fu, h, delta_sq);
hipDeviceSynchronize();
hipSetDevice(gpu_1);
hipLaunchKernelGGL(( jacobi_3_1), dim3(dimGrid),dim3(dimBlock), 0, 0, N, d1_U, d1_U_old, d0_U_old, d1_fu, h, delta_sq);
hipDeviceSynchronize();
//Swap Pointers device 0
tmp0 = d0_U;
d0_U = d0_U_old;
d0_U_old = tmp0;
//Swap Pointers device 1
tmp1 = d1_U;
d1_U = d1_U_old;
d1_U_old = tmp1;
k++;
}
hipMemcpy(h_U, d0_U, size_double/2.0, hipMemcpyDeviceToHost);
hipMemcpy(h_U_old, d0_U_old, size_double/2.0, hipMemcpyDeviceToHost);
hipMemcpy(h_fu, d0_fu, size_int/2, hipMemcpyDeviceToHost);
hipMemcpy(h_U + (N*N)/2, d1_U, size_double/2.0, hipMemcpyDeviceToHost);
hipMemcpy(h_U_old + (N*N)/2, d1_U_old, size_double/2.0, hipMemcpyDeviceToHost);
hipMemcpy(h_fu + (N*N)/2, d1_fu, size_int/2, hipMemcpyDeviceToHost);
//print_matrix(N,N,h_U);
hipHostFree(h_U);
hipHostFree(h_U_old);
hipHostFree(h_fu);
hipFree(d0_U);
hipFree(d0_U_old);
hipFree(d0_fu);
hipFree(d1_U);
hipFree(d1_U_old);
hipFree(d1_fu);
}
return(0); //end of main
}
void init_F(int N, int *F) {
double delta = 2/((double)N - 1.0);
double fx, fy;
for (int i=0;i<N;i++) {
for (int j=0;j<N;j++) {
fx = (double)j*delta-1.0;
fy = -1.0*((double)i*delta-1.0);
if (0<=fx && fx<=(1.0/3.0) && (-2.0/3.0)<=fy && fy<=(-1.0/3.0)) {
f(i,j) = 200;
} else {
f(i,j) = 0;
}
}
}
}
void print_matrix(int m, int n, double *mat) {
int x = 0;
int y = 0;
for(x = 0 ; x < m ; x++) {
printf(" ");
for(y = 0 ; y < n ; y++){
printf("%f ", mat[x*m+y]);
}
printf(";\n");
}
}
void print_F(int m, int n, int *mat) {
int x = 0;
int y = 0;
for(x = 0 ; x < m ; x++) {
printf(" ");
for(y = 0 ; y < n ; y++){
printf("%d ", mat[x*m+y]);
}
printf("\n");
}
}
|
beecf044abe023d30aace1f3e4fd5885d8a364f7.cu
|
#include <stdlib.h>
#include <omp.h>
#include <stdio.h>
#include <math.h>
#include "jacobi_gpu.h"
#define d_u(i,j) d_U[(i)*N + (j)]
#define d_u_old(i,j) d_U_old[(i)*N + (j)]
#define h_u(i,j) h_U[(i)*N + (j)]
#define h_u_old(i,j) h_U_old[(i)*N + (j)]
#define f(i,j) F[(i)*N + (j)]
// Declarations
void init_F(int N, int *F);
void print_matrix(int m, int n, double *matrix);
void print_F(int m, int n, int *mat);
int main(int argc, char *argv[]) {
int max_it, N, *h_fu, type;
double *h_U, *h_U_old;
int i, j, k = 0;
double h = 1.0 / 4.0;
N = atoi(argv[1]);
max_it = atoi(argv[2]);
type = atoi(argv[3]);
double delta = 2/((double)N - 1.0), delta_sq = delta * delta;
//printf("%d", N);
//printf("%d", max_it);
//U = (double *)malloc(N * N * sizeof(double));
//U_old = (double *)malloc(N * N * sizeof(double));
//fu = (int *)malloc(N * N * sizeof(int));
double size_double = N * N * sizeof(double);
int size_int = N * N * sizeof(int);
cudaMallocHost((void **)&h_U, size_double);
cudaMallocHost((void **)&h_U_old, size_double);
cudaMallocHost((void **)&h_fu, size_int);
init_F(N, h_fu);
#pragma omp parallel firstprivate(h_U,h_U_old) private(i,j) \
shared(N)
{
// Initialize U and U_old
#pragma omp for
for (i=0; i<N; i++) {
h_u_old(i,0) = 20.0;
h_u_old(0,i) = 20.0;
h_u_old(i,N-1) = 20.0;
h_u(i,0) = 20.0;
h_u(0,i) = 20.0;
h_u(i,N-1) = 20.0;
}
#pragma omp for
for (i=1; i<N; i++) {
for (j=1; j<N-1; j++) {
h_u_old(i,j) = 0.0;
h_u(i,j) = 0.0;
}
}
} // end omp parallel
//print_matrix(N,N,h_U_old);
if (type == 1){
double *d_U, *d_U_old, *tmp;
int *d_fu;
cudaMalloc((void **)&d_U, size_double);
cudaMalloc((void **)&d_U_old, size_double);
cudaMalloc((void **)&d_fu, size_int);
dim3 dimBlock(1,1,1);
dim3 dimGrid(1,1,1);
cudaMemcpy(d_U, h_U, size_double, cudaMemcpyHostToDevice);
cudaMemcpy(d_U_old, h_U_old, size_double, cudaMemcpyHostToDevice);
cudaMemcpy(d_fu, h_fu, size_int, cudaMemcpyHostToDevice);
while (k < max_it) {
jacobi_1<<<dimGrid,dimBlock>>>(N, d_U, d_U_old, d_fu, h, delta_sq);
cudaDeviceSynchronize();
//Swap Pointers
tmp = d_U;
d_U = d_U_old;
d_U_old = tmp;
k++;
}
cudaMemcpy(h_U, d_U, size_double, cudaMemcpyDeviceToHost);
cudaMemcpy(h_U_old, d_U_old, size_double, cudaMemcpyDeviceToHost);
cudaMemcpy(h_fu, d_fu, size_int, cudaMemcpyDeviceToHost);
//print_matrix(N,N,h_U);
cudaFreeHost(h_U);
cudaFreeHost(h_U_old);
cudaFreeHost(h_fu);
cudaFree(d_U);
cudaFree(d_U_old);
cudaFree(d_fu);
} else if (type == 2){
double *d_U, *d_U_old, *tmp;
int *d_fu;
cudaMalloc((void **)&d_U, size_double);
cudaMalloc((void **)&d_U_old, size_double);
cudaMalloc((void **)&d_fu, size_int);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid(ceil((double)N/dimBlock.x), ceil((double)N/dimBlock.y), 1); // Num blocks
cudaMemcpy(d_U, h_U, size_double, cudaMemcpyHostToDevice);
cudaMemcpy(d_U_old, h_U_old, size_double, cudaMemcpyHostToDevice);
cudaMemcpy(d_fu, h_fu, size_int, cudaMemcpyHostToDevice);
while (k < max_it) {
jacobi_2<<<dimGrid,dimBlock>>>(N, d_U, d_U_old, d_fu, h, delta_sq);
cudaDeviceSynchronize();
//Swap Pointers
tmp = d_U;
d_U = d_U_old;
d_U_old = tmp;
k++;
}
cudaMemcpy(h_U, d_U, size_double, cudaMemcpyDeviceToHost);
cudaMemcpy(h_U_old, d_U_old, size_double, cudaMemcpyDeviceToHost);
cudaMemcpy(h_fu, d_fu, size_int, cudaMemcpyDeviceToHost);
//print_matrix(N,N,h_U);
cudaFreeHost(h_U);
cudaFreeHost(h_U_old);
cudaFreeHost(h_fu);
cudaFree(d_U);
cudaFree(d_U_old);
cudaFree(d_fu);
} else if (type == 3 && N % 2 == 0){ //N has to be an even number
//int access0from1, access1from0, ; //access gpu 0 from gpu 1
int flags, gpu_0 = 0, gpu_1 = 1;
// Enable peer access
//cudaDeviceCanAccessPeer(&access0from1, gpu_1, gpu_0);
//cudaDeviceCanAccessPeer(&access1from0, gpu_0, gpu_1);
cudaSetDevice(gpu_0);
cudaDeviceEnablePeerAccess(gpu_1,flags=0);
cudaSetDevice(gpu_1);
cudaDeviceEnablePeerAccess(gpu_0,flags=0);
// Do operations
double *d0_U, *d0_U_old, *d1_U, *d1_U_old, *tmp0, *tmp1;
int *d0_fu, *d1_fu;
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid(ceil((double)N/dimBlock.x), ceil(((double)N/2.0)/dimBlock.y), 1); // Num blocks
// Device 0
cudaSetDevice(gpu_0);
cudaMalloc((void **)&d0_U, size_double/2.0);
cudaMalloc((void **)&d0_U_old, size_double/2.0);
cudaMalloc((void **)&d0_fu, size_int/2);
cudaMemcpy(d0_U, h_U, size_double/2.0, cudaMemcpyHostToDevice);
cudaMemcpy(d0_U_old, h_U_old, size_double/2.0, cudaMemcpyHostToDevice);
cudaMemcpy(d0_fu, h_fu, size_int/2, cudaMemcpyHostToDevice);
// Device 1
cudaSetDevice(gpu_1);
cudaMalloc((void **)&d1_U, size_double/2.0);
cudaMalloc((void **)&d1_U_old, size_double/2.0);
cudaMalloc((void **)&d1_fu, size_int/2);
cudaMemcpy(d1_U, h_U + (N*N)/2, size_double/2.0, cudaMemcpyHostToDevice);
cudaMemcpy(d1_U_old, h_U_old + (N*N)/2, size_double/2.0, cudaMemcpyHostToDevice);
cudaMemcpy(d1_fu, h_fu + (N*N)/2, size_int/2, cudaMemcpyHostToDevice);
while (k < max_it) {
cudaSetDevice(gpu_0);
jacobi_3_0<<<dimGrid,dimBlock>>>(N, d0_U, d0_U_old, d1_U_old, d0_fu, h, delta_sq);
cudaDeviceSynchronize();
cudaSetDevice(gpu_1);
jacobi_3_1<<<dimGrid,dimBlock>>>(N, d1_U, d1_U_old, d0_U_old, d1_fu, h, delta_sq);
cudaDeviceSynchronize();
//Swap Pointers device 0
tmp0 = d0_U;
d0_U = d0_U_old;
d0_U_old = tmp0;
//Swap Pointers device 1
tmp1 = d1_U;
d1_U = d1_U_old;
d1_U_old = tmp1;
k++;
}
cudaMemcpy(h_U, d0_U, size_double/2.0, cudaMemcpyDeviceToHost);
cudaMemcpy(h_U_old, d0_U_old, size_double/2.0, cudaMemcpyDeviceToHost);
cudaMemcpy(h_fu, d0_fu, size_int/2, cudaMemcpyDeviceToHost);
cudaMemcpy(h_U + (N*N)/2, d1_U, size_double/2.0, cudaMemcpyDeviceToHost);
cudaMemcpy(h_U_old + (N*N)/2, d1_U_old, size_double/2.0, cudaMemcpyDeviceToHost);
cudaMemcpy(h_fu + (N*N)/2, d1_fu, size_int/2, cudaMemcpyDeviceToHost);
//print_matrix(N,N,h_U);
cudaFreeHost(h_U);
cudaFreeHost(h_U_old);
cudaFreeHost(h_fu);
cudaFree(d0_U);
cudaFree(d0_U_old);
cudaFree(d0_fu);
cudaFree(d1_U);
cudaFree(d1_U_old);
cudaFree(d1_fu);
}
return(0); //end of main
}
void init_F(int N, int *F) {
double delta = 2/((double)N - 1.0);
double fx, fy;
for (int i=0;i<N;i++) {
for (int j=0;j<N;j++) {
fx = (double)j*delta-1.0;
fy = -1.0*((double)i*delta-1.0);
if (0<=fx && fx<=(1.0/3.0) && (-2.0/3.0)<=fy && fy<=(-1.0/3.0)) {
f(i,j) = 200;
} else {
f(i,j) = 0;
}
}
}
}
void print_matrix(int m, int n, double *mat) {
int x = 0;
int y = 0;
for(x = 0 ; x < m ; x++) {
printf(" ");
for(y = 0 ; y < n ; y++){
printf("%f ", mat[x*m+y]);
}
printf(";\n");
}
}
void print_F(int m, int n, int *mat) {
int x = 0;
int y = 0;
for(x = 0 ; x < m ; x++) {
printf(" ");
for(y = 0 ; y < n ; y++){
printf("%d ", mat[x*m+y]);
}
printf("\n");
}
}
|
8427ab772c2146d9bbe478def6e5f24ee58a23ee.hip
|
// !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#define BLOCK_SIZE 16
#define CWIDTH 32
#define CHEIGHT 32
#define FWIDTH 3
#define FHEIGHT 3
#define CNUMBER 32
//__shared__ float filteroutshared[32][32];
__global__ void convolute_kernel(float *IC, float*F, float *OC) {
//printf("hello");
/* int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Accumulate row i of A and column j of B
int i = by * blockDim.y + ty;
int j = bx * blockDim.x + tx;
float accu = 0.0;
for(int h=0; h<k; h++){
accu = accu + A[ i * k + h ] * B[ h * k + j ];
}
// Write the block sub-matrix to device memory;
// each thread writes one element
C[ i * k + j ] = accu;
*/
/* int i,j;
float c_acc = 0;
__shared__ float shared_A [BLOCK_SIZE][BLOCK_SIZE];
__shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tile = 0; tile < gridDim.x; tile++) {
j = tile * BLOCK_SIZE + threadIdx.x;
i = tile * BLOCK_SIZE + threadIdx.y;
shared_A[threadIdx.y][threadIdx.x] = A[row * k + j];
shared_B[threadIdx.y][threadIdx.x] = B[i * k + col];
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; k++) {
c_acc += shared_A[threadIdx.y][k] * shared_B[k][threadIdx.x];
}
__syncthreads();
}
C[row * k + col] = c_acc;
*/
/*
int idx = threadIdx.x;
int idy = threadIdx.y;
float c_acc[64];
for(int i = 0; i < 64; i++)
c_acc[i] = 0;
for(int j = 0; j < k; j++)
{
for(int i = 0; i < k/4; i++)
{
c_acc[i] += A[(idy*8 + idx/4) + j*256] * B[idx%4 + 4*i +256*j];
}
}
for(int i = 0; i < k/4; i++)
{
C[(idy*8 + idx/4)*256 + idx%4 + 4*i] = c_acc[i];
}
*/
/*
int kCenterX = FWIDTH / 2;
int kCenterY = FWIDTH / 2;
int a[2];
a[0] = 0;
a[1] = 0;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(i < CWIDTH && j < CWIDTH)
{
for(int k = 0; k < CNUMBER; k++)
{
for (int m = 0; m < FWIDTH; ++m) // kernel rows
{
for (int n = 0; n < FWIDTH; ++n) // kernel columns
{
// index of input, used for checking boundary
int ii = i + (m - kCenterY);
int jj = j + (n - kCenterX);
// ignore input samples which are out of bound
if (ii >= 0 && ii < CWIDTH && jj >= 0 && jj < CWIDTH)
a[k] += IC[k * CWIDTH * CWIDTH + ii * CWIDTH + jj] * F[k * FWIDTH * FWIDTH + m * FWIDTH + n];
}
}
OC[k + CWIDTH * CWIDTH + i * CWIDTH + j] = a[k];
}
}
*/
/*
int idx = threadIdx.x;
int idy = threadIdx.y;
int kCenterX = FWIDTH / 2;
int kCenterY = FWIDTH / 2;
int octemp[3][64];
__shared__ int filtemp[CNUMBER * FWIDTH * FWIDTH];
int a,b;
for(int j = 0; j < CNUMBER; j++)
{
for(int i = 0; i < 64; i++)
octemp[j][i] = 0;
}
if(idx < CNUMBER * FWIDTH * FWIDTH && idy == 0)
filtemp[idx] = F[idx];
__syncthreads();
for(int i = 0; i < CWIDTH/4; i++)
{
a = (idy*8 + idx/4);
b = idx%4 + 4*i;
for (int m = 0; m < FWIDTH; ++m) // kernel rows
{
for (int n = 0; n < FWIDTH; ++n) // kernel columns
{
// index of input, used for checking boundary
int ii = a + (m - kCenterY);
int jj = b + (n - kCenterX);
for(int k = 0; k < CNUMBER; k++)
{
// ignore input samples which are out of bound
if (ii >= 0 && ii < CWIDTH && jj >= 0 && jj < CWIDTH)
octemp[k][i] += IC[k * CWIDTH * CWIDTH + ii * CWIDTH + jj] * filtemp[k * FWIDTH * FWIDTH + m * FWIDTH + n];
}
}
}
}
for(int j = 0; j < CNUMBER; j++)
{
for(int i = 0; i < CWIDTH/4; i++)
{
OC[(j*CWIDTH*CWIDTH) + ((idy*8 + idx/4)*CWIDTH) + idx%4 + 4*i] = octemp[j][i];
}
}
*/
int idx = threadIdx.x;
int idy = threadIdx.y;
//printf("hello\n");
int kCenterX = FWIDTH / 2;
int kCenterY = FHEIGHT / 2;
int filtersperthread = CNUMBER / BLOCK_SIZE;
int remaining = CNUMBER % BLOCK_SIZE;
//printf("fil per t %d",filtersperthread);
float filtertemp[3][FHEIGHT][FWIDTH];
float outputtemp[CHEIGHT / BLOCK_SIZE + 1][CWIDTH];
//printf("hello");
for(int k = 0; k < filtersperthread; k++)
{
for(int i = 0; i < FHEIGHT; i++)
{
for(int j = 0; j < FWIDTH; j++)
{
filtertemp[k][i][j] = F[k * FHEIGHT * FWIDTH + i * FWIDTH + j];
//printf("%.1f %.1f\n",filtertemp[k][i][j],F[k * FHEIGHT * FWIDTH * BLOCK_SIZE + idx%BLOCK_SIZE * FHEIGHT * FWIDTH + i * FWIDTH + j]);
}
}
}
/*
if(idx == 0 && idy == 0)
{
for(int i = 0; i < FHEIGHT; i++)
{
for(int j = 0; j < FWIDTH; j++)
{
printf("%.1f",filtertemp[0][i][j]);
}
printf("\n");
}
}
*/
if(remaining > 0 && idx < remaining)
{
int k = filtersperthread;
for(int i = 0; i < FHEIGHT; i++)
{
for(int j = 0; j < FWIDTH; j++)
{
filtertemp[k][i][j] = F[k * FHEIGHT * FWIDTH + i * FWIDTH + j];
}
}
}
//idx = column number, idy = row number
for(int k = 0; k < filtersperthread; k++)
{
for(int i = (idy * CHEIGHT / BLOCK_SIZE) - kCenterY; i < ((idy + 1) * CHEIGHT / BLOCK_SIZE) + kCenterY; i++)
{
for(int j = 0; j < CWIDTH; j++)
{
if(i >= 0 && i < CHEIGHT)
{
int input_element = IC[(k * CHEIGHT * CWIDTH * BLOCK_SIZE) + ((idx%BLOCK_SIZE) * CHEIGHT * CWIDTH) + (i * CWIDTH) + j];
#pragma unroll
for (int m = 0; m < FHEIGHT; ++m) // kernel rows
{
for (int n = 0; n < FWIDTH; ++n) // kernel columns
{
int ii = i + (m - kCenterY);
int jj = j + (n - kCenterX);
if (ii >= 0 && ii < CHEIGHT && jj >= 0 && jj < CWIDTH && ii >= (idy * CHEIGHT / BLOCK_SIZE) && ii < ((idy + 1) * CHEIGHT / BLOCK_SIZE))
{
//printf("%.1f\n", filtertemp[k][m][n] * IC[idx * CWIDTH * CWIDTH + i * CWIDTH + j]);
outputtemp[ii - (idy * CHEIGHT / BLOCK_SIZE)][jj] += filtertemp[k][m][n] * input_element;
}
//__syncthreads();
}
}
}
}
}
for(int i = (idy * CHEIGHT / BLOCK_SIZE); i < ((idy + 1) * CHEIGHT / BLOCK_SIZE); i++)
{
for(int j = 0; j < CWIDTH; j++)
{
OC[(k * CHEIGHT * CWIDTH * BLOCK_SIZE) + ((idx%BLOCK_SIZE) * CHEIGHT * CWIDTH) + (i * CWIDTH) + j] = outputtemp[i - (idy * CHEIGHT / BLOCK_SIZE)][j];
outputtemp[i - (idy * CHEIGHT / BLOCK_SIZE)][j] = 0;
}
}
}
if(remaining > 0 && idx < remaining)
{
int k = filtersperthread;
for(int i = (idy * CHEIGHT / BLOCK_SIZE) - kCenterY; i < ((idy + 1) * CHEIGHT / BLOCK_SIZE) + kCenterY; i++)
{
for(int j = 0; j < CWIDTH; j++)
{
if(i >= 0 && i < CHEIGHT)
{
int input_element = IC[k * CHEIGHT * CWIDTH * BLOCK_SIZE + idx%BLOCK_SIZE * CHEIGHT * CWIDTH + i * CWIDTH + j];
#pragma unroll
for (int m = 0; m < FHEIGHT; ++m) // kernel rows
{
for (int n = 0; n < FWIDTH; ++n) // kernel columns
{
int ii = i + (m - kCenterY);
int jj = j + (n - kCenterX);
//if(idx == 0 && idy == 0)
//printf(" %d %d %d %d ", m, n, ii, jj);
if (ii >= 0 && ii < CHEIGHT && jj >= 0 && jj < CWIDTH && ii >= (idy * CHEIGHT / BLOCK_SIZE) && ii < ((idy + 1) * CHEIGHT / BLOCK_SIZE))
{
//printf("%f", filtertemp[k][m][n] * IC[idx * CWIDTH * CWIDTH + i * CWIDTH + j]);
outputtemp[ii - (idy * CHEIGHT / BLOCK_SIZE)][jj] += filtertemp[k][m][n] * input_element;
// if(idx == 0 && idy == 0)
//printf(" yo %d %d \n", m, n);
}
//__syncthreads();
}
}
}
}
}
for(int i = (idy * CHEIGHT / BLOCK_SIZE); i < ((idy + 1) * CHEIGHT / BLOCK_SIZE); i++)
{
for(int j = 0; j < CWIDTH; j++)
{
OC[k * CHEIGHT * CWIDTH * BLOCK_SIZE + (idx % BLOCK_SIZE) * CHEIGHT * CWIDTH + i * CWIDTH + j] = outputtemp[i - (idy * CHEIGHT / BLOCK_SIZE)][j];
outputtemp[i - (idy * CHEIGHT / BLOCK_SIZE)][j] = 0;
}
}
}
/*
for(int i = 0; i < 32; i++)
filteroutshared[idy][idx] = 0;
for(int i = 0; i < 32; i++)
filteroutshared[idy][(idx + i) % 32] += outputtemp[0][(idx + i) % 32];
__syncthreads();
if(idx == 0 && idy == 0)
{
for(int i = 0; i < 32; i++)
{
for(int j = 0; j < 32; j++)
printf("%.1f ", filteroutshared[i][j]);
printf("\n");
}
}
*/
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
void cudaconvolute(float* IC, float* F, float* OC, float*** OC_cpu)
{
float totalBytes_channel = sizeof(float) * CNUMBER * CHEIGHT * CWIDTH;
float totalBytes_filter = sizeof(float) * CNUMBER * FHEIGHT * FWIDTH;
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 blocks(N/BLOCK_SIZE,N/BLOCK_SIZE);
float* device_IC;
float* device_F;
float* device_OC;
hipMalloc(&device_IC, totalBytes_channel);
hipMalloc(&device_F, totalBytes_filter);
hipMalloc(&device_OC, totalBytes_channel);
hipMemcpy(device_IC, IC, totalBytes_channel, hipMemcpyHostToDevice);
hipMemcpy(device_F, F, totalBytes_filter, hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
double startKernelTime = CycleTimer::currentSeconds();
hipEventRecord(start);
hipLaunchKernelGGL(( convolute_kernel), dim3(1), dim3(threadsPerBlock), 0, 0, device_IC, device_F, device_OC);
hipEventRecord(stop);
hipDeviceSynchronize();
double endKernelTime = CycleTimer::currentSeconds();
hipMemcpy(OC, device_OC, totalBytes_channel, hipMemcpyDeviceToHost);
double kernelDuration = endKernelTime - startKernelTime;
printf("KernelDuration: %.3f ms\n", 1000.f * kernelDuration);
float m = 0;
hipEventElapsedTime(&m, start, stop);
printf("CUDA Elapsed Time %f ms\n", m);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Bandwidth = %f GFLOPS/s\n", (2 * FWIDTH * FHEIGHT * CWIDTH * CHEIGHT * CNUMBER) / (m * 1000000000));
// for(int k = 0; k < CNUMBER; k++) {
// for (int i = 0;i < CHEIGHT; i++){
// for (int j = 0; j < CWIDTH; j++) {
bool equal = true;
for(int k = 0; k < CNUMBER; k++) {
for (int i = 0;i < CHEIGHT; i++){
for (int j = 0; j < CWIDTH; j++) {
//printf("%0.1f ",OC[k * CHEIGHT * CWIDTH + i * CWIDTH + j]);
if(OC_cpu[k][i][j] != OC[k * CHEIGHT * CWIDTH + i * CWIDTH + j])
{
equal = false;
//printf("%0.1f ",OC[k * CHEIGHT * CWIDTH + i * CWIDTH + j]);
//printf("%d %d %d %.1f != %.1f\n", k , i, j, OC_cpu[k][i][j], OC[k * CHEIGHT * CWIDTH + i * CWIDTH + j]);
//break;
}
//printf("%d",OC[k * CNUMBER * CWIDTH + i * CWIDTH + j]);
}
//printf("\n");
}
//printf("\n");
}
if(equal)
printf("EQUAL\n");
else
printf("NOT EQUAL\n");
hipFree(device_IC);
hipFree(device_F);
hipFree(device_OC);
}
void
printCudaInfo() {
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
printf(" Shared memory per block: %d bytes\n", deviceProps.sharedMemPerBlock);
}
printf("---------------------------------------------------------\n");
}
//hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
//hipEventRecord(start);
//hipEventRecord(stop);
//float m = 0;
//hipEventElapsedTime(&m, start, stop);
//hipEventDestroy(start);
//hipEventDestroy(stop);
|
8427ab772c2146d9bbe478def6e5f24ee58a23ee.cu
|
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#define BLOCK_SIZE 16
#define CWIDTH 32
#define CHEIGHT 32
#define FWIDTH 3
#define FHEIGHT 3
#define CNUMBER 32
//__shared__ float filteroutshared[32][32];
__global__ void convolute_kernel(float *IC, float*F, float *OC) {
//printf("hello");
/* int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Accumulate row i of A and column j of B
int i = by * blockDim.y + ty;
int j = bx * blockDim.x + tx;
float accu = 0.0;
for(int h=0; h<k; h++){
accu = accu + A[ i * k + h ] * B[ h * k + j ];
}
// Write the block sub-matrix to device memory;
// each thread writes one element
C[ i * k + j ] = accu;
*/
/* int i,j;
float c_acc = 0;
__shared__ float shared_A [BLOCK_SIZE][BLOCK_SIZE];
__shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tile = 0; tile < gridDim.x; tile++) {
j = tile * BLOCK_SIZE + threadIdx.x;
i = tile * BLOCK_SIZE + threadIdx.y;
shared_A[threadIdx.y][threadIdx.x] = A[row * k + j];
shared_B[threadIdx.y][threadIdx.x] = B[i * k + col];
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; k++) {
c_acc += shared_A[threadIdx.y][k] * shared_B[k][threadIdx.x];
}
__syncthreads();
}
C[row * k + col] = c_acc;
*/
/*
int idx = threadIdx.x;
int idy = threadIdx.y;
float c_acc[64];
for(int i = 0; i < 64; i++)
c_acc[i] = 0;
for(int j = 0; j < k; j++)
{
for(int i = 0; i < k/4; i++)
{
c_acc[i] += A[(idy*8 + idx/4) + j*256] * B[idx%4 + 4*i +256*j];
}
}
for(int i = 0; i < k/4; i++)
{
C[(idy*8 + idx/4)*256 + idx%4 + 4*i] = c_acc[i];
}
*/
/*
int kCenterX = FWIDTH / 2;
int kCenterY = FWIDTH / 2;
int a[2];
a[0] = 0;
a[1] = 0;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(i < CWIDTH && j < CWIDTH)
{
for(int k = 0; k < CNUMBER; k++)
{
for (int m = 0; m < FWIDTH; ++m) // kernel rows
{
for (int n = 0; n < FWIDTH; ++n) // kernel columns
{
// index of input, used for checking boundary
int ii = i + (m - kCenterY);
int jj = j + (n - kCenterX);
// ignore input samples which are out of bound
if (ii >= 0 && ii < CWIDTH && jj >= 0 && jj < CWIDTH)
a[k] += IC[k * CWIDTH * CWIDTH + ii * CWIDTH + jj] * F[k * FWIDTH * FWIDTH + m * FWIDTH + n];
}
}
OC[k + CWIDTH * CWIDTH + i * CWIDTH + j] = a[k];
}
}
*/
/*
int idx = threadIdx.x;
int idy = threadIdx.y;
int kCenterX = FWIDTH / 2;
int kCenterY = FWIDTH / 2;
int octemp[3][64];
__shared__ int filtemp[CNUMBER * FWIDTH * FWIDTH];
int a,b;
for(int j = 0; j < CNUMBER; j++)
{
for(int i = 0; i < 64; i++)
octemp[j][i] = 0;
}
if(idx < CNUMBER * FWIDTH * FWIDTH && idy == 0)
filtemp[idx] = F[idx];
__syncthreads();
for(int i = 0; i < CWIDTH/4; i++)
{
a = (idy*8 + idx/4);
b = idx%4 + 4*i;
for (int m = 0; m < FWIDTH; ++m) // kernel rows
{
for (int n = 0; n < FWIDTH; ++n) // kernel columns
{
// index of input, used for checking boundary
int ii = a + (m - kCenterY);
int jj = b + (n - kCenterX);
for(int k = 0; k < CNUMBER; k++)
{
// ignore input samples which are out of bound
if (ii >= 0 && ii < CWIDTH && jj >= 0 && jj < CWIDTH)
octemp[k][i] += IC[k * CWIDTH * CWIDTH + ii * CWIDTH + jj] * filtemp[k * FWIDTH * FWIDTH + m * FWIDTH + n];
}
}
}
}
for(int j = 0; j < CNUMBER; j++)
{
for(int i = 0; i < CWIDTH/4; i++)
{
OC[(j*CWIDTH*CWIDTH) + ((idy*8 + idx/4)*CWIDTH) + idx%4 + 4*i] = octemp[j][i];
}
}
*/
int idx = threadIdx.x;
int idy = threadIdx.y;
//printf("hello\n");
int kCenterX = FWIDTH / 2;
int kCenterY = FHEIGHT / 2;
int filtersperthread = CNUMBER / BLOCK_SIZE;
int remaining = CNUMBER % BLOCK_SIZE;
//printf("fil per t %d",filtersperthread);
float filtertemp[3][FHEIGHT][FWIDTH];
float outputtemp[CHEIGHT / BLOCK_SIZE + 1][CWIDTH];
//printf("hello");
for(int k = 0; k < filtersperthread; k++)
{
for(int i = 0; i < FHEIGHT; i++)
{
for(int j = 0; j < FWIDTH; j++)
{
filtertemp[k][i][j] = F[k * FHEIGHT * FWIDTH + i * FWIDTH + j];
//printf("%.1f %.1f\n",filtertemp[k][i][j],F[k * FHEIGHT * FWIDTH * BLOCK_SIZE + idx%BLOCK_SIZE * FHEIGHT * FWIDTH + i * FWIDTH + j]);
}
}
}
/*
if(idx == 0 && idy == 0)
{
for(int i = 0; i < FHEIGHT; i++)
{
for(int j = 0; j < FWIDTH; j++)
{
printf("%.1f",filtertemp[0][i][j]);
}
printf("\n");
}
}
*/
if(remaining > 0 && idx < remaining)
{
int k = filtersperthread;
for(int i = 0; i < FHEIGHT; i++)
{
for(int j = 0; j < FWIDTH; j++)
{
filtertemp[k][i][j] = F[k * FHEIGHT * FWIDTH + i * FWIDTH + j];
}
}
}
//idx = column number, idy = row number
for(int k = 0; k < filtersperthread; k++)
{
for(int i = (idy * CHEIGHT / BLOCK_SIZE) - kCenterY; i < ((idy + 1) * CHEIGHT / BLOCK_SIZE) + kCenterY; i++)
{
for(int j = 0; j < CWIDTH; j++)
{
if(i >= 0 && i < CHEIGHT)
{
int input_element = IC[(k * CHEIGHT * CWIDTH * BLOCK_SIZE) + ((idx%BLOCK_SIZE) * CHEIGHT * CWIDTH) + (i * CWIDTH) + j];
#pragma unroll
for (int m = 0; m < FHEIGHT; ++m) // kernel rows
{
for (int n = 0; n < FWIDTH; ++n) // kernel columns
{
int ii = i + (m - kCenterY);
int jj = j + (n - kCenterX);
if (ii >= 0 && ii < CHEIGHT && jj >= 0 && jj < CWIDTH && ii >= (idy * CHEIGHT / BLOCK_SIZE) && ii < ((idy + 1) * CHEIGHT / BLOCK_SIZE))
{
//printf("%.1f\n", filtertemp[k][m][n] * IC[idx * CWIDTH * CWIDTH + i * CWIDTH + j]);
outputtemp[ii - (idy * CHEIGHT / BLOCK_SIZE)][jj] += filtertemp[k][m][n] * input_element;
}
//__syncthreads();
}
}
}
}
}
for(int i = (idy * CHEIGHT / BLOCK_SIZE); i < ((idy + 1) * CHEIGHT / BLOCK_SIZE); i++)
{
for(int j = 0; j < CWIDTH; j++)
{
OC[(k * CHEIGHT * CWIDTH * BLOCK_SIZE) + ((idx%BLOCK_SIZE) * CHEIGHT * CWIDTH) + (i * CWIDTH) + j] = outputtemp[i - (idy * CHEIGHT / BLOCK_SIZE)][j];
outputtemp[i - (idy * CHEIGHT / BLOCK_SIZE)][j] = 0;
}
}
}
if(remaining > 0 && idx < remaining)
{
int k = filtersperthread;
for(int i = (idy * CHEIGHT / BLOCK_SIZE) - kCenterY; i < ((idy + 1) * CHEIGHT / BLOCK_SIZE) + kCenterY; i++)
{
for(int j = 0; j < CWIDTH; j++)
{
if(i >= 0 && i < CHEIGHT)
{
int input_element = IC[k * CHEIGHT * CWIDTH * BLOCK_SIZE + idx%BLOCK_SIZE * CHEIGHT * CWIDTH + i * CWIDTH + j];
#pragma unroll
for (int m = 0; m < FHEIGHT; ++m) // kernel rows
{
for (int n = 0; n < FWIDTH; ++n) // kernel columns
{
int ii = i + (m - kCenterY);
int jj = j + (n - kCenterX);
//if(idx == 0 && idy == 0)
//printf(" %d %d %d %d ", m, n, ii, jj);
if (ii >= 0 && ii < CHEIGHT && jj >= 0 && jj < CWIDTH && ii >= (idy * CHEIGHT / BLOCK_SIZE) && ii < ((idy + 1) * CHEIGHT / BLOCK_SIZE))
{
//printf("%f", filtertemp[k][m][n] * IC[idx * CWIDTH * CWIDTH + i * CWIDTH + j]);
outputtemp[ii - (idy * CHEIGHT / BLOCK_SIZE)][jj] += filtertemp[k][m][n] * input_element;
// if(idx == 0 && idy == 0)
//printf(" yo %d %d \n", m, n);
}
//__syncthreads();
}
}
}
}
}
for(int i = (idy * CHEIGHT / BLOCK_SIZE); i < ((idy + 1) * CHEIGHT / BLOCK_SIZE); i++)
{
for(int j = 0; j < CWIDTH; j++)
{
OC[k * CHEIGHT * CWIDTH * BLOCK_SIZE + (idx % BLOCK_SIZE) * CHEIGHT * CWIDTH + i * CWIDTH + j] = outputtemp[i - (idy * CHEIGHT / BLOCK_SIZE)][j];
outputtemp[i - (idy * CHEIGHT / BLOCK_SIZE)][j] = 0;
}
}
}
/*
for(int i = 0; i < 32; i++)
filteroutshared[idy][idx] = 0;
for(int i = 0; i < 32; i++)
filteroutshared[idy][(idx + i) % 32] += outputtemp[0][(idx + i) % 32];
__syncthreads();
if(idx == 0 && idy == 0)
{
for(int i = 0; i < 32; i++)
{
for(int j = 0; j < 32; j++)
printf("%.1f ", filteroutshared[i][j]);
printf("\n");
}
}
*/
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
void cudaconvolute(float* IC, float* F, float* OC, float*** OC_cpu)
{
float totalBytes_channel = sizeof(float) * CNUMBER * CHEIGHT * CWIDTH;
float totalBytes_filter = sizeof(float) * CNUMBER * FHEIGHT * FWIDTH;
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 blocks(N/BLOCK_SIZE,N/BLOCK_SIZE);
float* device_IC;
float* device_F;
float* device_OC;
cudaMalloc(&device_IC, totalBytes_channel);
cudaMalloc(&device_F, totalBytes_filter);
cudaMalloc(&device_OC, totalBytes_channel);
cudaMemcpy(device_IC, IC, totalBytes_channel, cudaMemcpyHostToDevice);
cudaMemcpy(device_F, F, totalBytes_filter, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
double startKernelTime = CycleTimer::currentSeconds();
cudaEventRecord(start);
convolute_kernel<<<1, threadsPerBlock>>>(device_IC, device_F, device_OC);
cudaEventRecord(stop);
cudaDeviceSynchronize();
double endKernelTime = CycleTimer::currentSeconds();
cudaMemcpy(OC, device_OC, totalBytes_channel, cudaMemcpyDeviceToHost);
double kernelDuration = endKernelTime - startKernelTime;
printf("KernelDuration: %.3f ms\n", 1000.f * kernelDuration);
float m = 0;
cudaEventElapsedTime(&m, start, stop);
printf("CUDA Elapsed Time %f ms\n", m);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Bandwidth = %f GFLOPS/s\n", (2 * FWIDTH * FHEIGHT * CWIDTH * CHEIGHT * CNUMBER) / (m * 1000000000));
// for(int k = 0; k < CNUMBER; k++) {
// for (int i = 0;i < CHEIGHT; i++){
// for (int j = 0; j < CWIDTH; j++) {
bool equal = true;
for(int k = 0; k < CNUMBER; k++) {
for (int i = 0;i < CHEIGHT; i++){
for (int j = 0; j < CWIDTH; j++) {
//printf("%0.1f ",OC[k * CHEIGHT * CWIDTH + i * CWIDTH + j]);
if(OC_cpu[k][i][j] != OC[k * CHEIGHT * CWIDTH + i * CWIDTH + j])
{
equal = false;
//printf("%0.1f ",OC[k * CHEIGHT * CWIDTH + i * CWIDTH + j]);
//printf("%d %d %d %.1f != %.1f\n", k , i, j, OC_cpu[k][i][j], OC[k * CHEIGHT * CWIDTH + i * CWIDTH + j]);
//break;
}
//printf("%d",OC[k * CNUMBER * CWIDTH + i * CWIDTH + j]);
}
//printf("\n");
}
//printf("\n");
}
if(equal)
printf("EQUAL\n");
else
printf("NOT EQUAL\n");
cudaFree(device_IC);
cudaFree(device_F);
cudaFree(device_OC);
}
void
printCudaInfo() {
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
printf(" Shared memory per block: %d bytes\n", deviceProps.sharedMemPerBlock);
}
printf("---------------------------------------------------------\n");
}
//cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//cudaEventRecord(start);
//cudaEventRecord(stop);
//float m = 0;
//cudaEventElapsedTime(&m, start, stop);
//cudaEventDestroy(start);
//cudaEventDestroy(stop);
|
369b08fc8214dc52bdbcb89e58fc24b77a792df2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*Suma Matrices*/
#include <iostream>
using namespace std;
__global__
void sumaMatrizKernel(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i<n*n)
C[i] = A[i] +B[i];
}
void sumaMatrix(float* A, float* B, float* C, int tam)
{
int size = (tam*tam) * sizeof(float);
float *d_A,*d_B,*d_C;
hipMalloc((void**)&d_A,size);
hipMalloc((void**)&d_B,size);
hipMalloc((void**)&d_C,size);
hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
hipMemcpy(d_B,B,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumaMatrizKernel), dim3(ceil(tam*tam/256.0)),dim3(256), 0, 0, d_A,d_B,d_C,tam);
hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost);
hipFree(d_A);hipFree(d_B);hipFree(d_C);
}
int main()
{
int n = 10;
float *h_A,*h_B,*h_C;
h_A = new float[n*n];
h_B = new float[n*n];
h_C = new float[n*n];
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
h_A[i*n+j] = rand() % 100;
}
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
h_B[i*n+j] = rand() % 100;
}
cout<<"Los vectores generados son: "<<endl;
for(int i = 0; i < n; i++){
cout<<h_A[i]<<" ; ";
}
cout<<endl;
for(int i = 0; i < n; i++){
cout<<h_B[i]<<" ; ";
}
cout<<endl;
sumaMatrix(h_A,h_B,h_C,n);
for(int i = 0; i < n; i++){
cout<<h_C[i]<<" ; ";
}
cout<<endl;
return 0;
}
|
369b08fc8214dc52bdbcb89e58fc24b77a792df2.cu
|
/*Suma Matrices*/
#include <iostream>
using namespace std;
__global__
void sumaMatrizKernel(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i<n*n)
C[i] = A[i] +B[i];
}
void sumaMatrix(float* A, float* B, float* C, int tam)
{
int size = (tam*tam) * sizeof(float);
float *d_A,*d_B,*d_C;
cudaMalloc((void**)&d_A,size);
cudaMalloc((void**)&d_B,size);
cudaMalloc((void**)&d_C,size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
sumaMatrizKernel<<<ceil(tam*tam/256.0),256>>>(d_A,d_B,d_C,tam);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree(d_A);cudaFree(d_B);cudaFree(d_C);
}
int main()
{
int n = 10;
float *h_A,*h_B,*h_C;
h_A = new float[n*n];
h_B = new float[n*n];
h_C = new float[n*n];
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
h_A[i*n+j] = rand() % 100;
}
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
h_B[i*n+j] = rand() % 100;
}
cout<<"Los vectores generados son: "<<endl;
for(int i = 0; i < n; i++){
cout<<h_A[i]<<" ; ";
}
cout<<endl;
for(int i = 0; i < n; i++){
cout<<h_B[i]<<" ; ";
}
cout<<endl;
sumaMatrix(h_A,h_B,h_C,n);
for(int i = 0; i < n; i++){
cout<<h_C[i]<<" ; ";
}
cout<<endl;
return 0;
}
|
2d06f9cd091da407da8c47efa43474dc3a402c85.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/char_types/char_cases.h>
#include <strings/char_types/char_flags.h>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
#include <cstring>
namespace cudf {
namespace strings {
namespace detail {
// Used to build a temporary string_view object from a single host string.
std::unique_ptr<string_view, std::function<void(string_view*)>> string_from_host(
const char* str, rmm::cuda_stream_view stream)
{
if (!str) return nullptr;
auto length = std::strlen(str);
auto* d_str = new rmm::device_buffer(length, stream);
CUDA_TRY(hipMemcpyAsync(d_str->data(), str, length, hipMemcpyHostToDevice, stream.value()));
stream.synchronize();
auto deleter = [d_str](string_view* sv) { delete d_str; };
return std::unique_ptr<string_view, decltype(deleter)>{
new string_view(reinterpret_cast<char*>(d_str->data()), length), deleter};
}
/**
* @copydoc create_string_vector_from_column
*/
rmm::device_uvector<string_view> create_string_vector_from_column(cudf::strings_column_view strings,
rmm::cuda_stream_view stream)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
rmm::device_uvector<string_view> strings_vector(strings.size(), stream);
string_view* d_strings = strings_vector.data();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings.size(),
[d_column, d_strings] __device__(size_type idx) {
if (d_column.is_null(idx))
d_strings[idx] = string_view(nullptr, 0);
else
d_strings[idx] = d_column.element<string_view>(idx);
});
return strings_vector;
}
/**
* @copydoc child_offsets_from_string_vector
*/
std::unique_ptr<cudf::column> child_offsets_from_string_vector(
cudf::device_span<string_view> strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return child_offsets_from_string_iterator(strings.begin(), strings.size(), stream, mr);
}
/**
* @copydoc child_chars_from_string_vector
*/
std::unique_ptr<cudf::column> child_chars_from_string_vector(cudf::device_span<string_view> strings,
column_view const& offsets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const d_strings = strings.data();
auto const bytes = cudf::detail::get_value<int32_t>(offsets, strings.size(), stream);
auto const d_offsets = offsets.data<int32_t>();
// create column
auto chars_column =
make_numeric_column(data_type{type_id::INT8}, bytes, mask_state::UNALLOCATED, stream, mr);
// get it's view
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings.size(),
[d_strings, d_offsets, d_chars] __device__(size_type idx) {
string_view const d_str = d_strings[idx];
memcpy(d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes());
});
return chars_column;
}
//
std::unique_ptr<column> create_chars_child_column(cudf::size_type strings_count,
cudf::size_type null_count,
cudf::size_type total_bytes,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(null_count <= strings_count, "Invalid null count");
return make_numeric_column(
data_type{type_id::INT8}, total_bytes, mask_state::UNALLOCATED, stream, mr);
}
//
std::unique_ptr<column> make_empty_strings_column(rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return std::make_unique<column>(data_type{type_id::STRING},
0,
rmm::device_buffer{0, stream, mr}, // data
rmm::device_buffer{0, stream, mr},
0); // nulls
}
namespace {
// The device variables are created here to avoid using a singleton that may cause issues
// with RMM initialize/finalize. See PR #3159 for details on this approach.
__device__ character_flags_table_type
character_codepoint_flags[sizeof(g_character_codepoint_flags)];
__device__ character_cases_table_type character_cases_table[sizeof(g_character_cases_table)];
__device__ special_case_mapping character_special_case_mappings[sizeof(g_special_case_mappings)];
thread_safe_per_context_cache<character_flags_table_type> d_character_codepoint_flags;
thread_safe_per_context_cache<character_cases_table_type> d_character_cases_table;
thread_safe_per_context_cache<special_case_mapping> d_special_case_mappings;
} // namespace
/**
* @copydoc cudf::strings::detail::get_character_flags_table
*/
const character_flags_table_type* get_character_flags_table()
{
return d_character_codepoint_flags.find_or_initialize([&](void) {
character_flags_table_type* table = nullptr;
CUDA_TRY(hipMemcpyToSymbol(
character_codepoint_flags, g_character_codepoint_flags, sizeof(g_character_codepoint_flags)));
CUDA_TRY(hipGetSymbolAddress((void**)&table, character_codepoint_flags));
return table;
});
}
/**
* @copydoc cudf::strings::detail::get_character_cases_table
*/
const character_cases_table_type* get_character_cases_table()
{
return d_character_cases_table.find_or_initialize([&](void) {
character_cases_table_type* table = nullptr;
CUDA_TRY(hipMemcpyToSymbol(
character_cases_table, g_character_cases_table, sizeof(g_character_cases_table)));
CUDA_TRY(hipGetSymbolAddress((void**)&table, character_cases_table));
return table;
});
}
/**
* @copydoc cudf::strings::detail::get_special_case_mapping_table
*/
const special_case_mapping* get_special_case_mapping_table()
{
return d_special_case_mappings.find_or_initialize([&](void) {
special_case_mapping* table = nullptr;
CUDA_TRY(hipMemcpyToSymbol(
character_special_case_mappings, g_special_case_mappings, sizeof(g_special_case_mappings)));
CUDA_TRY(hipGetSymbolAddress((void**)&table, character_special_case_mappings));
return table;
});
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
2d06f9cd091da407da8c47efa43474dc3a402c85.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/char_types/char_cases.h>
#include <strings/char_types/char_flags.h>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
#include <cstring>
namespace cudf {
namespace strings {
namespace detail {
// Used to build a temporary string_view object from a single host string.
std::unique_ptr<string_view, std::function<void(string_view*)>> string_from_host(
const char* str, rmm::cuda_stream_view stream)
{
if (!str) return nullptr;
auto length = std::strlen(str);
auto* d_str = new rmm::device_buffer(length, stream);
CUDA_TRY(cudaMemcpyAsync(d_str->data(), str, length, cudaMemcpyHostToDevice, stream.value()));
stream.synchronize();
auto deleter = [d_str](string_view* sv) { delete d_str; };
return std::unique_ptr<string_view, decltype(deleter)>{
new string_view(reinterpret_cast<char*>(d_str->data()), length), deleter};
}
/**
* @copydoc create_string_vector_from_column
*/
rmm::device_uvector<string_view> create_string_vector_from_column(cudf::strings_column_view strings,
rmm::cuda_stream_view stream)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
rmm::device_uvector<string_view> strings_vector(strings.size(), stream);
string_view* d_strings = strings_vector.data();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings.size(),
[d_column, d_strings] __device__(size_type idx) {
if (d_column.is_null(idx))
d_strings[idx] = string_view(nullptr, 0);
else
d_strings[idx] = d_column.element<string_view>(idx);
});
return strings_vector;
}
/**
* @copydoc child_offsets_from_string_vector
*/
std::unique_ptr<cudf::column> child_offsets_from_string_vector(
cudf::device_span<string_view> strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return child_offsets_from_string_iterator(strings.begin(), strings.size(), stream, mr);
}
/**
* @copydoc child_chars_from_string_vector
*/
std::unique_ptr<cudf::column> child_chars_from_string_vector(cudf::device_span<string_view> strings,
column_view const& offsets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const d_strings = strings.data();
auto const bytes = cudf::detail::get_value<int32_t>(offsets, strings.size(), stream);
auto const d_offsets = offsets.data<int32_t>();
// create column
auto chars_column =
make_numeric_column(data_type{type_id::INT8}, bytes, mask_state::UNALLOCATED, stream, mr);
// get it's view
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings.size(),
[d_strings, d_offsets, d_chars] __device__(size_type idx) {
string_view const d_str = d_strings[idx];
memcpy(d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes());
});
return chars_column;
}
//
std::unique_ptr<column> create_chars_child_column(cudf::size_type strings_count,
cudf::size_type null_count,
cudf::size_type total_bytes,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(null_count <= strings_count, "Invalid null count");
return make_numeric_column(
data_type{type_id::INT8}, total_bytes, mask_state::UNALLOCATED, stream, mr);
}
//
std::unique_ptr<column> make_empty_strings_column(rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return std::make_unique<column>(data_type{type_id::STRING},
0,
rmm::device_buffer{0, stream, mr}, // data
rmm::device_buffer{0, stream, mr},
0); // nulls
}
namespace {
// The device variables are created here to avoid using a singleton that may cause issues
// with RMM initialize/finalize. See PR #3159 for details on this approach.
__device__ character_flags_table_type
character_codepoint_flags[sizeof(g_character_codepoint_flags)];
__device__ character_cases_table_type character_cases_table[sizeof(g_character_cases_table)];
__device__ special_case_mapping character_special_case_mappings[sizeof(g_special_case_mappings)];
thread_safe_per_context_cache<character_flags_table_type> d_character_codepoint_flags;
thread_safe_per_context_cache<character_cases_table_type> d_character_cases_table;
thread_safe_per_context_cache<special_case_mapping> d_special_case_mappings;
} // namespace
/**
* @copydoc cudf::strings::detail::get_character_flags_table
*/
const character_flags_table_type* get_character_flags_table()
{
return d_character_codepoint_flags.find_or_initialize([&](void) {
character_flags_table_type* table = nullptr;
CUDA_TRY(cudaMemcpyToSymbol(
character_codepoint_flags, g_character_codepoint_flags, sizeof(g_character_codepoint_flags)));
CUDA_TRY(cudaGetSymbolAddress((void**)&table, character_codepoint_flags));
return table;
});
}
/**
* @copydoc cudf::strings::detail::get_character_cases_table
*/
const character_cases_table_type* get_character_cases_table()
{
return d_character_cases_table.find_or_initialize([&](void) {
character_cases_table_type* table = nullptr;
CUDA_TRY(cudaMemcpyToSymbol(
character_cases_table, g_character_cases_table, sizeof(g_character_cases_table)));
CUDA_TRY(cudaGetSymbolAddress((void**)&table, character_cases_table));
return table;
});
}
/**
* @copydoc cudf::strings::detail::get_special_case_mapping_table
*/
const special_case_mapping* get_special_case_mapping_table()
{
return d_special_case_mappings.find_or_initialize([&](void) {
special_case_mapping* table = nullptr;
CUDA_TRY(cudaMemcpyToSymbol(
character_special_case_mappings, g_special_case_mappings, sizeof(g_special_case_mappings)));
CUDA_TRY(cudaGetSymbolAddress((void**)&table, character_special_case_mappings));
return table;
});
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
3062d5372c6c01f44f17b9618a288a81fd678770.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/magma_zthrsrm.cu, normal z -> s, Mon Jun 25 18:24:26 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#include <hip/hip_runtime.h>
#define SWAP(a, b) { tmp = a; a = b; b = tmp; }
#define BLOCK_SIZE 128
__global__ void
zcompute_newval_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
magma_index_t* Browidx,
magma_index_t* Bcol,
float* Aval,
float* Bval)
{
int tidx = blockIdx.x*blockDim.x+threadIdx.x;
magma_index_t offset_new, offset_old, end_old;
if (tidx < num_rows) {
magma_int_t count = 0;
offset_old = Arow[tidx];
offset_new = Brow[tidx];
end_old = Arow[tidx+1];
for (int i = offset_old; i < end_old; i++) {
if(Acol[i]>-1){
Bcol[offset_new+count] = Acol[i];
Bval[offset_new+count] = Aval[i];
Browidx[offset_new + count] = tidx;
count++;
}
}
}
}
//kernel
__global__ void
zcompute_nnz_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
float* Aval,
float thrs)
{
int row= blockIdx.x*blockDim.x+threadIdx.x;
if (row < num_rows) {
magma_int_t rm = 0;
magma_int_t el = 0;
for (int i = Arow[row]; i<Arow[row+1]; i++) {
if (MAGMA_S_ABS(Aval[i]) <= thrs ) {
if (Acol[i] != row) {
Acol[i] = -1;//cheaperthanval
rm++;
} else {
el++;
}
} else {
el++;
}
}
Brow[row] = el;
}
}
/**
Purpose
-------
This routine selects a threshold separating the subset_size smallest
magnitude elements from the rest.
Arguments
---------
@param[out]
thrs float*
computed threshold
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_sthrsholdrm_gpu(
magma_int_t order,
magma_s_matrix* A,
float* thrs,
magma_queue_t queue)
{
magma_int_t info = 0;
magma_int_t num_blocks = magma_ceildiv(A->num_rows,BLOCK_SIZE);
magma_s_matrix B={Magma_CSR};
B.num_rows = A->num_rows;
B.num_cols = A->num_cols;
B.storage_type = A->storage_type;
B.memory_location = Magma_DEV;
magma_index_t *new_rownnz={NULL};
dim3 block(BLOCK_SIZE, 1, 1);
dim3 grid(num_blocks, 1, 1 );
magma_index_malloc(&new_rownnz,A->num_rows);
magma_index_malloc(&B.drow,A->num_rows+1);
hipLaunchKernelGGL(( zcompute_nnz_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream(),
A->num_rows, A->drow, new_rownnz, A->dcol, A->dval,*thrs);
magma_sget_row_ptr(A->num_rows, &B.nnz, new_rownnz, B.drow, queue);
magma_smalloc(&B.dval,B.nnz);
magma_index_malloc(&B.rowidx,B.nnz);
magma_index_malloc(&B.dcol,B.nnz);
hipLaunchKernelGGL(( zcompute_newval_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream(),
A->num_rows, A->drow, B.drow, A->dcol,B.drowidx, B.dcol, A->dval, B.dval);
//Rewrite the matrix with all the new values
magma_smatrix_swap(&B, A, queue);
magma_smfree(&B, queue);
magma_free(new_rownnz);
return info;
}
|
3062d5372c6c01f44f17b9618a288a81fd678770.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/magma_zthrsrm.cu, normal z -> s, Mon Jun 25 18:24:26 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#include <cuda_runtime.h>
#define SWAP(a, b) { tmp = a; a = b; b = tmp; }
#define BLOCK_SIZE 128
__global__ void
zcompute_newval_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
magma_index_t* Browidx,
magma_index_t* Bcol,
float* Aval,
float* Bval)
{
int tidx = blockIdx.x*blockDim.x+threadIdx.x;
magma_index_t offset_new, offset_old, end_old;
if (tidx < num_rows) {
magma_int_t count = 0;
offset_old = Arow[tidx];
offset_new = Brow[tidx];
end_old = Arow[tidx+1];
for (int i = offset_old; i < end_old; i++) {
if(Acol[i]>-1){
Bcol[offset_new+count] = Acol[i];
Bval[offset_new+count] = Aval[i];
Browidx[offset_new + count] = tidx;
count++;
}
}
}
}
//kernel
__global__ void
zcompute_nnz_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
float* Aval,
float thrs)
{
int row= blockIdx.x*blockDim.x+threadIdx.x;
if (row < num_rows) {
magma_int_t rm = 0;
magma_int_t el = 0;
for (int i = Arow[row]; i<Arow[row+1]; i++) {
if (MAGMA_S_ABS(Aval[i]) <= thrs ) {
if (Acol[i] != row) {
Acol[i] = -1;//cheaperthanval
rm++;
} else {
el++;
}
} else {
el++;
}
}
Brow[row] = el;
}
}
/**
Purpose
-------
This routine selects a threshold separating the subset_size smallest
magnitude elements from the rest.
Arguments
---------
@param[out]
thrs float*
computed threshold
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_sthrsholdrm_gpu(
magma_int_t order,
magma_s_matrix* A,
float* thrs,
magma_queue_t queue)
{
magma_int_t info = 0;
magma_int_t num_blocks = magma_ceildiv(A->num_rows,BLOCK_SIZE);
magma_s_matrix B={Magma_CSR};
B.num_rows = A->num_rows;
B.num_cols = A->num_cols;
B.storage_type = A->storage_type;
B.memory_location = Magma_DEV;
magma_index_t *new_rownnz={NULL};
dim3 block(BLOCK_SIZE, 1, 1);
dim3 grid(num_blocks, 1, 1 );
magma_index_malloc(&new_rownnz,A->num_rows);
magma_index_malloc(&B.drow,A->num_rows+1);
zcompute_nnz_kernel<<<grid, block, 0, queue->cuda_stream()>>>
(A->num_rows, A->drow, new_rownnz, A->dcol, A->dval,*thrs);
magma_sget_row_ptr(A->num_rows, &B.nnz, new_rownnz, B.drow, queue);
magma_smalloc(&B.dval,B.nnz);
magma_index_malloc(&B.rowidx,B.nnz);
magma_index_malloc(&B.dcol,B.nnz);
zcompute_newval_kernel<<<grid, block, 0, queue->cuda_stream()>>>
(A->num_rows, A->drow, B.drow, A->dcol,B.drowidx, B.dcol, A->dval, B.dval);
//Rewrite the matrix with all the new values
magma_smatrix_swap(&B, A, queue);
magma_smfree(&B, queue);
magma_free(new_rownnz);
return info;
}
|
16c0ebff3768298760b13d3935363d7f61231050.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../local_exchange.cuh"
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
int main(int argc, char* args[]) {
int batch_size = atoi(args[1]);
int n_expert = atoi(args[2]);
long* gate_idx = new long[batch_size];
long* n_gate_idx = new long[batch_size];
int* ref_lec = new int[n_expert];
memset(ref_lec, 0, sizeof(int) * n_expert);
for (int i = 0; i < batch_size; ++i) {
gate_idx[i] = rand() % (n_expert + 1) - 1;
if (gate_idx[i] != -1) {
ref_lec[gate_idx[i]] += 1;
}
}
puts("ref lec");
for (int i = 0; i < n_expert; ++i) {
printf("%d ", ref_lec[i]);
}
putchar(10);
int* g_lec;
hipMalloc(&g_lec, sizeof(int) * n_expert);
hipMemset(g_lec, 0, sizeof(int) * n_expert);
long* g_gate_idx;
hipMalloc(&g_gate_idx, sizeof(long) * batch_size);
hipMemcpy(g_gate_idx, gate_idx, sizeof(long) * batch_size,
hipMemcpyHostToDevice);
auto smgr = getCudaStreamManager(0);
fmoe_cuda_expert_count_impl(g_gate_idx, g_lec, batch_size, n_expert, smgr);
int* lec = new int[n_expert];
hipMemcpy(lec, g_lec, sizeof(int) * n_expert, hipMemcpyDeviceToHost);
puts("lec");
for (int i = 0; i < n_expert; ++i) {
printf("%d ", lec[i]);
}
putchar(10);
}
|
16c0ebff3768298760b13d3935363d7f61231050.cu
|
#include "../local_exchange.cuh"
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cuda.h>
#include <cuda_runtime.h>
int main(int argc, char* args[]) {
int batch_size = atoi(args[1]);
int n_expert = atoi(args[2]);
long* gate_idx = new long[batch_size];
long* n_gate_idx = new long[batch_size];
int* ref_lec = new int[n_expert];
memset(ref_lec, 0, sizeof(int) * n_expert);
for (int i = 0; i < batch_size; ++i) {
gate_idx[i] = rand() % (n_expert + 1) - 1;
if (gate_idx[i] != -1) {
ref_lec[gate_idx[i]] += 1;
}
}
puts("ref lec");
for (int i = 0; i < n_expert; ++i) {
printf("%d ", ref_lec[i]);
}
putchar(10);
int* g_lec;
cudaMalloc(&g_lec, sizeof(int) * n_expert);
cudaMemset(g_lec, 0, sizeof(int) * n_expert);
long* g_gate_idx;
cudaMalloc(&g_gate_idx, sizeof(long) * batch_size);
cudaMemcpy(g_gate_idx, gate_idx, sizeof(long) * batch_size,
cudaMemcpyHostToDevice);
auto smgr = getCudaStreamManager(0);
fmoe_cuda_expert_count_impl(g_gate_idx, g_lec, batch_size, n_expert, smgr);
int* lec = new int[n_expert];
cudaMemcpy(lec, g_lec, sizeof(int) * n_expert, cudaMemcpyDeviceToHost);
puts("lec");
for (int i = 0; i < n_expert; ++i) {
printf("%d ", lec[i]);
}
putchar(10);
}
|
26b1b7adba04c95c6f50a812db2152a43b47fb2c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//*LB*
// Copyright (c) 2010, University of Bonn, Institute for Computer Science VI
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the University of Bonn
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//*LE*
#include <cuv/basics/tensor.hpp>
#include <cuv/matrix_ops/matrix_ops.hpp>
#include <cuv/libs/separable_conv/separable_convolution.hpp>
#include <cuv/libs/nlmeans/conv3d.hpp>
#include "hog.hpp"
template<class V, class I>
__global__
void select_arg_kernel(V* dst, const V* src, const I* arg, unsigned int w){
const unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id>=w) return;
dst[id] = src[arg[id]*w+id];
}
template<class V>
__global__
void atan2_abs_and_norm_kernel(V* angle, V* norm, const V* gy, const V* gx, unsigned int w){
const unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id>=w) return;
V gyval=gy[id];
V gxval=gx[id];
V tmp = atan2(gyval,gxval);
if(tmp<0) tmp += (float) M_PI;
norm[id] = sqrt(gyval*gyval + gxval*gxval);
angle[id] = tmp;
}
template<class V>
__global__
void orientation_binning_kernel(V*dst, const V* norms, const V* angles, const unsigned int steps, const unsigned int w, const unsigned int h){
const unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
V lowerbinval = 0;
V upperbinval = 0;
V ang = angles[id];
V mag = norms [id];
const V angdiff = (float)M_PI / (float)steps;
for(unsigned int s = 0; s<steps; s++){
float a1 = (float)M_PI / steps * s;
float a2 = (float)M_PI / steps * (s+1);
if(ang>=a1 && ang<a2){
lowerbinval += mag * (a2-ang)/angdiff;
upperbinval = mag * (ang-a1)/angdiff;
}else{
upperbinval = 0;
}
// for linear interpolation, we write exactly twice in each bin.
// after the second time we write the lower value.
// this works for all except the first iterations of s (which
// is therefore written twice to global memory, see after loop)
dst[s*w*h + id] = lowerbinval;
lowerbinval = upperbinval;
}
// this slightly suboptimal operation saves lots of juggling with shared memory.
dst[0*w*h + id] += lowerbinval;
}
namespace cuv{ namespace libs{ namespace hog{
namespace detail{
inline unsigned int __host__ __device__ divup(unsigned int a, unsigned int b)
{
if (a % b) /* does a divide b leaving a remainder? */
return a / b + 1; /* add in additional block */
else
return a / b; /* divides cleanly */
}
/** after determining the argmax using reduce, we need to extract the
* corresponding values from two matrices. This kernel does the job.
*
* @param dst h x w matrix where result is written to
* @param src c x h x w matrix where we read from
* @param arg h*w matrix of indices, elements should be in [0,c[
*
*/
template<class V, class I>
void select_arg(cuv::tensor<V,dev_memory_space>& dst, const cuv::tensor<V,dev_memory_space>&src, const cuv::tensor<I,dev_memory_space>& arg){
cuvAssert(dst.ndim()==2);
cuvAssert(src.ndim()==3);
cuvAssert(arg.ndim()==1);
const unsigned int h = dst.shape()[0];
const unsigned int w = dst.shape()[1];
cuvAssert(src.shape()[1]==h);
cuvAssert(src.shape()[2]==w);
cuvAssert(arg.shape()[0]==h*w);
/*cuvAssert(minimum(arg)>=0);*/
/*cuvAssert(maximum(arg)< src.shape()[0]);*/
dim3 blocks(divup(arg.shape()[0], 256));
dim3 threads(256);
hipLaunchKernelGGL(( select_arg_kernel), dim3(blocks),dim3(threads), 0, 0, dst.ptr(),src.ptr(),arg.ptr(),arg.shape()[0]);
cuvSafeCall(hipDeviceSynchronize());
}
/** determine angle of gradient disregarding polarisation
*/
template<class V>
void atan2_abs_and_norm(cuv::tensor<V,dev_memory_space>& angle, cuv::tensor<V,dev_memory_space>& norm, const cuv::tensor<V,dev_memory_space>& gy, const cuv::tensor<V,dev_memory_space>& gx){
cuvAssert(equal_shape(gx,gy));
cuvAssert(equal_shape(gx,angle));
cuvAssert(equal_shape(gx,norm));
dim3 blocks(divup(norm.size(),256));
dim3 threads(256);
hipLaunchKernelGGL(( atan2_abs_and_norm_kernel), dim3(blocks),dim3(threads), 0, 0, angle.ptr(),norm.ptr(),gy.ptr(),gx.ptr(),norm.size());
cuvSafeCall(hipDeviceSynchronize());
}
/** bin orientations with bilinear interpolation
* @param dst steps x h x w matrix of resulting gradient maps
* @param norms h x w matrix of gradient magnitudes
* @param angles h x w matrix of angles to be binned
*/
template<class V>
void orientation_binning(cuv::tensor<V,dev_memory_space>& dst, const cuv::tensor<V,dev_memory_space>& norms, const cuv::tensor<V,dev_memory_space>& angles){
cuvAssert(dst.ndim()==3);
cuvAssert(norms.ndim()==2);
cuvAssert(equal_shape(norms,angles));
cuvAssert(dst.shape()[1]==norms.shape()[0]);
cuvAssert(dst.shape()[2]==norms.shape()[1]);
const unsigned int steps = dst.shape()[0];
const unsigned int h = dst.shape()[1];
const unsigned int w = dst.shape()[2];
dim3 blocks(divup(dst.shape()[1]*dst.shape()[2],256));
dim3 threads(256);
hipLaunchKernelGGL(( orientation_binning_kernel), dim3(blocks),dim3(threads), 0, 0, dst.ptr(),norms.ptr(),angles.ptr(),steps,w,h);
cuvSafeCall(hipDeviceSynchronize());
}
template<class V>
void hog(cuv::tensor<V, dev_memory_space>& bins, const cuv::tensor<V,dev_memory_space>& src, unsigned int spatialpool){
typedef cuv::tensor<V,dev_memory_space> tens_t;
unsigned int chann = src.shape()[0];
unsigned int height = src.shape()[1];
unsigned int width = src.shape()[2];
unsigned int steps = bins.shape()[0];
tens_t magnitude(extents[height][width]);
tens_t angle (extents[height][width]);
{ tens_t gradx(src.shape()),
grady(src.shape()),
allmagnitudes(src.shape()),
allangles(src.shape());
// determine the centered derivatives in x and y direction
cuv::tensor<float,host_memory_space> diff(3);
diff[0] = -0.5f;
diff[1] = 0.f;
diff[2] = 0.5f;
/*cuv::libs::nlmeans::setConvolutionKernel(diff);*/
/*cuv::libs::nlmeans::setConvolutionKernel(diff);*/
cuv::libs::nlmeans::convolutionRows(grady,src,diff);
cuv::libs::nlmeans::convolutionColumns(gradx,src,diff);
// calculate the gradient norms and directions
atan2_abs_and_norm(allangles,allmagnitudes, gradx,grady);
// determine channel with maximal magnitude
allmagnitudes.reshape(chann,height*width);
cuv::tensor<unsigned int,dev_memory_space> argmax(extents[height*width]);
cuv::reduce_to_row(argmax,allmagnitudes,RF_ARGMAX);
allmagnitudes.reshape(extents[chann][height][width]);
// in magnitudes/angles, put maximal values
select_arg(magnitude,allmagnitudes,argmax);
select_arg(angle ,allangles ,argmax);
}
// discretize using bilinear interpolation
orientation_binning(bins,magnitude,angle);
// spatial pooling
{
cuv::tensor<float,host_memory_space> kernel(2*spatialpool+1);
float sigma = spatialpool/2.f;
float sum = 0.f;
for(int i = 0; i < 2*spatialpool+1; i++){
float dist = (float)(i - (int)spatialpool);
float val = expf(- dist * dist / (2*sigma*sigma));
kernel[i] = val;
sum += val;
}
kernel /= sum;
cuv::libs::nlmeans::setConvolutionKernel(kernel);
cuv::libs::nlmeans::setConvolutionKernel(kernel);
tens_t intermed(bins.shape());
cuv::libs::nlmeans::convolutionRows(intermed,bins,kernel);
cuv::libs::nlmeans::convolutionColumns(bins,intermed,kernel);
}
// normalization
bins.reshape(extents[steps][width*height]);
tens_t norms(width*height);
reduce_to_row(norms,bins,RF_ADD_SQUARED);
norms += 0.0001f;
apply_scalar_functor(norms,SF_SQRT);
matrix_divide_row(bins,norms);
// clip
apply_scalar_functor(bins,SF_MIN, 0.2f);
// renormalize
reduce_to_row(norms,bins,RF_ADD_SQUARED);
norms += 0.0001f;
apply_scalar_functor(norms,SF_SQRT);
matrix_divide_row(bins,norms);
bins.reshape(extents[steps][width][height]);
}
template<class V>
void hog(cuv::tensor<V, host_memory_space>& dst, const cuv::tensor<V,host_memory_space>& src, unsigned int spatialpool){
throw std::runtime_error("not implemented");
}
}
template<class V, class M>
void hog(cuv::tensor<V, M>& dst, const cuv::tensor<V,M>& src, unsigned int spatialpool){
cuvAssert(src.ndim()==3);
cuvAssert(dst.ndim()==3);
cuvAssert(src.shape()[0]==3);
cuvAssert(src.shape()[1]==dst.shape()[1]);
cuvAssert(src.shape()[2]==dst.shape()[2]);
detail::hog(dst,src, spatialpool);
}
#define TENS(V,M) \
cuv::tensor<V,M>
#define INSTANTIATE(V,M) \
template void hog(TENS(V, M)&, const TENS(V, M)&, unsigned int);
INSTANTIATE(float,dev_memory_space)
INSTANTIATE(float,host_memory_space)
} } }
|
26b1b7adba04c95c6f50a812db2152a43b47fb2c.cu
|
//*LB*
// Copyright (c) 2010, University of Bonn, Institute for Computer Science VI
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the University of Bonn
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//*LE*
#include <cuv/basics/tensor.hpp>
#include <cuv/matrix_ops/matrix_ops.hpp>
#include <cuv/libs/separable_conv/separable_convolution.hpp>
#include <cuv/libs/nlmeans/conv3d.hpp>
#include "hog.hpp"
template<class V, class I>
__global__
void select_arg_kernel(V* dst, const V* src, const I* arg, unsigned int w){
const unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id>=w) return;
dst[id] = src[arg[id]*w+id];
}
template<class V>
__global__
void atan2_abs_and_norm_kernel(V* angle, V* norm, const V* gy, const V* gx, unsigned int w){
const unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id>=w) return;
V gyval=gy[id];
V gxval=gx[id];
V tmp = atan2(gyval,gxval);
if(tmp<0) tmp += (float) M_PI;
norm[id] = sqrt(gyval*gyval + gxval*gxval);
angle[id] = tmp;
}
template<class V>
__global__
void orientation_binning_kernel(V*dst, const V* norms, const V* angles, const unsigned int steps, const unsigned int w, const unsigned int h){
const unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
V lowerbinval = 0;
V upperbinval = 0;
V ang = angles[id];
V mag = norms [id];
const V angdiff = (float)M_PI / (float)steps;
for(unsigned int s = 0; s<steps; s++){
float a1 = (float)M_PI / steps * s;
float a2 = (float)M_PI / steps * (s+1);
if(ang>=a1 && ang<a2){
lowerbinval += mag * (a2-ang)/angdiff;
upperbinval = mag * (ang-a1)/angdiff;
}else{
upperbinval = 0;
}
// for linear interpolation, we write exactly twice in each bin.
// after the second time we write the lower value.
// this works for all except the first iterations of s (which
// is therefore written twice to global memory, see after loop)
dst[s*w*h + id] = lowerbinval;
lowerbinval = upperbinval;
}
// this slightly suboptimal operation saves lots of juggling with shared memory.
dst[0*w*h + id] += lowerbinval;
}
namespace cuv{ namespace libs{ namespace hog{
namespace detail{
inline unsigned int __host__ __device__ divup(unsigned int a, unsigned int b)
{
if (a % b) /* does a divide b leaving a remainder? */
return a / b + 1; /* add in additional block */
else
return a / b; /* divides cleanly */
}
/** after determining the argmax using reduce, we need to extract the
* corresponding values from two matrices. This kernel does the job.
*
* @param dst h x w matrix where result is written to
* @param src c x h x w matrix where we read from
* @param arg h*w matrix of indices, elements should be in [0,c[
*
*/
template<class V, class I>
void select_arg(cuv::tensor<V,dev_memory_space>& dst, const cuv::tensor<V,dev_memory_space>&src, const cuv::tensor<I,dev_memory_space>& arg){
cuvAssert(dst.ndim()==2);
cuvAssert(src.ndim()==3);
cuvAssert(arg.ndim()==1);
const unsigned int h = dst.shape()[0];
const unsigned int w = dst.shape()[1];
cuvAssert(src.shape()[1]==h);
cuvAssert(src.shape()[2]==w);
cuvAssert(arg.shape()[0]==h*w);
/*cuvAssert(minimum(arg)>=0);*/
/*cuvAssert(maximum(arg)< src.shape()[0]);*/
dim3 blocks(divup(arg.shape()[0], 256));
dim3 threads(256);
select_arg_kernel<<<blocks,threads>>>(dst.ptr(),src.ptr(),arg.ptr(),arg.shape()[0]);
cuvSafeCall(cudaThreadSynchronize());
}
/** determine angle of gradient disregarding polarisation
*/
template<class V>
void atan2_abs_and_norm(cuv::tensor<V,dev_memory_space>& angle, cuv::tensor<V,dev_memory_space>& norm, const cuv::tensor<V,dev_memory_space>& gy, const cuv::tensor<V,dev_memory_space>& gx){
cuvAssert(equal_shape(gx,gy));
cuvAssert(equal_shape(gx,angle));
cuvAssert(equal_shape(gx,norm));
dim3 blocks(divup(norm.size(),256));
dim3 threads(256);
atan2_abs_and_norm_kernel<<<blocks,threads>>>(angle.ptr(),norm.ptr(),gy.ptr(),gx.ptr(),norm.size());
cuvSafeCall(cudaThreadSynchronize());
}
/** bin orientations with bilinear interpolation
* @param dst steps x h x w matrix of resulting gradient maps
* @param norms h x w matrix of gradient magnitudes
* @param angles h x w matrix of angles to be binned
*/
template<class V>
void orientation_binning(cuv::tensor<V,dev_memory_space>& dst, const cuv::tensor<V,dev_memory_space>& norms, const cuv::tensor<V,dev_memory_space>& angles){
cuvAssert(dst.ndim()==3);
cuvAssert(norms.ndim()==2);
cuvAssert(equal_shape(norms,angles));
cuvAssert(dst.shape()[1]==norms.shape()[0]);
cuvAssert(dst.shape()[2]==norms.shape()[1]);
const unsigned int steps = dst.shape()[0];
const unsigned int h = dst.shape()[1];
const unsigned int w = dst.shape()[2];
dim3 blocks(divup(dst.shape()[1]*dst.shape()[2],256));
dim3 threads(256);
orientation_binning_kernel<<<blocks,threads>>>(dst.ptr(),norms.ptr(),angles.ptr(),steps,w,h);
cuvSafeCall(cudaThreadSynchronize());
}
template<class V>
void hog(cuv::tensor<V, dev_memory_space>& bins, const cuv::tensor<V,dev_memory_space>& src, unsigned int spatialpool){
typedef cuv::tensor<V,dev_memory_space> tens_t;
unsigned int chann = src.shape()[0];
unsigned int height = src.shape()[1];
unsigned int width = src.shape()[2];
unsigned int steps = bins.shape()[0];
tens_t magnitude(extents[height][width]);
tens_t angle (extents[height][width]);
{ tens_t gradx(src.shape()),
grady(src.shape()),
allmagnitudes(src.shape()),
allangles(src.shape());
// determine the centered derivatives in x and y direction
cuv::tensor<float,host_memory_space> diff(3);
diff[0] = -0.5f;
diff[1] = 0.f;
diff[2] = 0.5f;
/*cuv::libs::nlmeans::setConvolutionKernel(diff);*/
/*cuv::libs::nlmeans::setConvolutionKernel(diff);*/
cuv::libs::nlmeans::convolutionRows(grady,src,diff);
cuv::libs::nlmeans::convolutionColumns(gradx,src,diff);
// calculate the gradient norms and directions
atan2_abs_and_norm(allangles,allmagnitudes, gradx,grady);
// determine channel with maximal magnitude
allmagnitudes.reshape(chann,height*width);
cuv::tensor<unsigned int,dev_memory_space> argmax(extents[height*width]);
cuv::reduce_to_row(argmax,allmagnitudes,RF_ARGMAX);
allmagnitudes.reshape(extents[chann][height][width]);
// in magnitudes/angles, put maximal values
select_arg(magnitude,allmagnitudes,argmax);
select_arg(angle ,allangles ,argmax);
}
// discretize using bilinear interpolation
orientation_binning(bins,magnitude,angle);
// spatial pooling
{
cuv::tensor<float,host_memory_space> kernel(2*spatialpool+1);
float sigma = spatialpool/2.f;
float sum = 0.f;
for(int i = 0; i < 2*spatialpool+1; i++){
float dist = (float)(i - (int)spatialpool);
float val = expf(- dist * dist / (2*sigma*sigma));
kernel[i] = val;
sum += val;
}
kernel /= sum;
cuv::libs::nlmeans::setConvolutionKernel(kernel);
cuv::libs::nlmeans::setConvolutionKernel(kernel);
tens_t intermed(bins.shape());
cuv::libs::nlmeans::convolutionRows(intermed,bins,kernel);
cuv::libs::nlmeans::convolutionColumns(bins,intermed,kernel);
}
// normalization
bins.reshape(extents[steps][width*height]);
tens_t norms(width*height);
reduce_to_row(norms,bins,RF_ADD_SQUARED);
norms += 0.0001f;
apply_scalar_functor(norms,SF_SQRT);
matrix_divide_row(bins,norms);
// clip
apply_scalar_functor(bins,SF_MIN, 0.2f);
// renormalize
reduce_to_row(norms,bins,RF_ADD_SQUARED);
norms += 0.0001f;
apply_scalar_functor(norms,SF_SQRT);
matrix_divide_row(bins,norms);
bins.reshape(extents[steps][width][height]);
}
template<class V>
void hog(cuv::tensor<V, host_memory_space>& dst, const cuv::tensor<V,host_memory_space>& src, unsigned int spatialpool){
throw std::runtime_error("not implemented");
}
}
template<class V, class M>
void hog(cuv::tensor<V, M>& dst, const cuv::tensor<V,M>& src, unsigned int spatialpool){
cuvAssert(src.ndim()==3);
cuvAssert(dst.ndim()==3);
cuvAssert(src.shape()[0]==3);
cuvAssert(src.shape()[1]==dst.shape()[1]);
cuvAssert(src.shape()[2]==dst.shape()[2]);
detail::hog(dst,src, spatialpool);
}
#define TENS(V,M) \
cuv::tensor<V,M>
#define INSTANTIATE(V,M) \
template void hog(TENS(V, M)&, const TENS(V, M)&, unsigned int);
INSTANTIATE(float,dev_memory_space)
INSTANTIATE(float,host_memory_space)
} } }
|
0b3af3c10309f2aa01ff573703f69918827fa19b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include <cutil.h>
//#include <cutil_math.h>
//#include <cutil_inline_runtime.h>
__global__ void createPixels(unsigned char* positions,float time,unsigned int width,unsigned height){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
positions[((y*width+x)*4)*sizeof(unsigned char)]= 125;//(int)(time*60)%255;
positions[((y*width+x)*4+1)*sizeof(unsigned char)]= 255;//255*x/width;
positions[((y*width+x)*4+2)*sizeof(unsigned char)]= 125;//255-255*x/width;
positions[((y*width+x)*4+3)*sizeof(unsigned char)]= 105;//0;
}
extern "C" void createImg(unsigned char* ptr,float time,unsigned int width,unsigned height){
dim3 dimBlock(16,16,1);
dim3 dimGrid(width/dimBlock.x,height/dimBlock.y,1);
hipLaunchKernelGGL(( createPixels), dim3(dimGrid),dim3(dimBlock), 0, 0, ptr,time,width,height);
//cutilSafeCall( hipDeviceSynchronize() );
}
|
0b3af3c10309f2aa01ff573703f69918827fa19b.cu
|
//#include <cutil.h>
//#include <cutil_math.h>
//#include <cutil_inline_runtime.h>
__global__ void createPixels(unsigned char* positions,float time,unsigned int width,unsigned height){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
positions[((y*width+x)*4)*sizeof(unsigned char)]= 125;//(int)(time*60)%255;
positions[((y*width+x)*4+1)*sizeof(unsigned char)]= 255;//255*x/width;
positions[((y*width+x)*4+2)*sizeof(unsigned char)]= 125;//255-255*x/width;
positions[((y*width+x)*4+3)*sizeof(unsigned char)]= 105;//0;
}
extern "C" void createImg(unsigned char* ptr,float time,unsigned int width,unsigned height){
dim3 dimBlock(16,16,1);
dim3 dimGrid(width/dimBlock.x,height/dimBlock.y,1);
createPixels<<<dimGrid,dimBlock>>>(ptr,time,width,height);
//cutilSafeCall( cudaThreadSynchronize() );
}
|
83778bd825f18afc56fa099c201b6a3f3a4ff1d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
// For kernels with precision conversion built in
#define checkSpinorLength(a, b) \
{ \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
}
namespace quda {
hipStream_t* getBlasStream();
namespace copy {
#include <texture.h>
static struct {
const char *vol_str;
const char *aux_str;
} blasStrings;
template <typename FloatN, int N, typename Output, typename Input>
__global__ void copyKernel(Output Y, Input X, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
FloatN x[N];
X.load(x, i);
Y.save(x, i);
i += gridSize;
}
}
template <typename FloatN, int N, typename Output, typename Input>
class CopyCuda : public Tunable {
private:
Input &X;
Output &Y;
const int length;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
virtual bool advanceSharedBytes(TuneParam ¶m) const
{
TuneParam next(param);
advanceBlockDim(next); // to get next blockDim
int nthreads = next.block.x * next.block.y * next.block.z;
param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
return false;
}
public:
CopyCuda(Output &Y, Input &X, int length) : X(X), Y(Y), length(length) { }
virtual ~CopyCuda() { ; }
inline TuneKey tuneKey() const {
return TuneKey(blasStrings.vol_str, "copyKernel", blasStrings.aux_str);
}
inline void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( copyKernel<FloatN, N>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, Y, X, length);
}
void preTune() { ; } // no need to save state for copy kernels
void postTune() { ; } // no need to restore state for copy kernels
long long flops() const { return 0; }
long long bytes() const {
const int Ninternal = (sizeof(FloatN)/sizeof(((FloatN*)0)->x))*N;
size_t bytes = (X.Precision() + Y.Precision())*Ninternal;
if (X.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
if (Y.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
return bytes*length;
}
int tuningIter() const { return 3; }
};
void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
if (&src == &dst) return; // aliasing fields
if (src.Nspin() != 1 && src.Nspin() != 4) errorQuda("nSpin(%d) not supported\n", src.Nspin());
if (dst.SiteSubset() == QUDA_FULL_SITE_SUBSET || src.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
if (src.SiteSubset() != dst.SiteSubset())
errorQuda("Spinor fields do not have matching subsets dst=%d src=%d\n",
dst.SiteSubset(), src.SiteSubset());
copy::copyCuda(dst.Even(), src.Even());
copy::copyCuda(dst.Odd(), src.Odd());
return;
}
checkSpinorLength(dst, src);
blasStrings.vol_str = src.VolString();
char tmp[256];
strcpy(tmp, "dst=");
strcat(tmp, dst.AuxString());
strcat(tmp, ",src=");
strcat(tmp, src.AuxString());
blasStrings.aux_str = tmp;
// For a given dst precision, there are two non-trivial possibilities for the
// src precision.
// FIXME: use traits to encapsulate register type for shorts -
// will reduce template type parameters from 3 to 2
blas_bytes += (unsigned long long)src.RealLength()*(src.Precision() + dst.Precision());
if (dst.Precision() == src.Precision()) {
if (src.Bytes() != dst.Bytes()) errorQuda("Precisions match, but bytes do not");
hipMemcpy(dst.V(), src.V(), dst.Bytes(), hipMemcpyDeviceToDevice);
if (dst.Precision() == QUDA_HALF_PRECISION) {
hipMemcpy(dst.Norm(), src.Norm(), dst.NormBytes(), hipMemcpyDeviceToDevice);
blas_bytes += 2*(unsigned long long)dst.RealLength()*sizeof(float);
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, float4, float4, 6, 0, 0> src_tex(src);
Spinor<float4, float2, double2, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float2, double2, 6, 1>,
Spinor<float4, float4, float4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //src.Nspin() == 1
Spinor<float2, float2, float2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, double2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, double2, 3, 1>,
Spinor<float2, float2, float2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, float2, double2, 6, 0, 0> src_tex(src);
Spinor<float4, float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, float4, 6, 1>,
Spinor<float4, float2, double2, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //src.Nspin() ==1
Spinor<float2, float2, double2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, float2, 3, 1>,
Spinor<float2, float2, double2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas_bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, float4, short4, 6, 0, 0> src_tex(src);
Spinor<float4, float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, float4, 6, 1>,
Spinor<float4, float4, short4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin== 1;
Spinor<float2, float2, short2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, float2, 3, 1>,
Spinor<float2, float2, short2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
blas_bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, float4, float4, 6, 0, 0> src_tex(src);
Spinor<float4, float4, short4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, short4, 6, 1>,
Spinor<float4, float4, float4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
Spinor<float2, float2, float2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, short2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, short2, 3, 1>,
Spinor<float2, float2, float2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas_bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, float4, short4, 12, 0, 0> src_tex(src);
Spinor<double2, double2, double2, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, double2, double2, 12, 1>,
Spinor<double2, float4, short4, 12, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
Spinor<double2, float2, short2, 3, 0, 0> src_tex(src);
Spinor<double2, double2, double2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, double2, double2, 3, 1>,
Spinor<double2, float2, short2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
blas_bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, double2, double2, 12, 0, 0> src_tex(src);
Spinor<double2, double4, short4, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, double4, short4, 12, 1>,
Spinor<double2, double2, double2, 12, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
Spinor<double2, double2, double2, 3, 0, 0> src_tex(src);
Spinor<double2, double2, short2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, double2, short2, 3, 1>,
Spinor<double2, double2, double2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else {
errorQuda("Invalid precision combination dst=%d and src=%d", dst.Precision(), src.Precision());
}
checkCudaError();
}
} // namespace copy
void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
copy::copyCuda(dst, src);
}
} // namespace quda
|
83778bd825f18afc56fa099c201b6a3f3a4ff1d6.cu
|
#include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
// For kernels with precision conversion built in
#define checkSpinorLength(a, b) \
{ \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
}
namespace quda {
cudaStream_t* getBlasStream();
namespace copy {
#include <texture.h>
static struct {
const char *vol_str;
const char *aux_str;
} blasStrings;
template <typename FloatN, int N, typename Output, typename Input>
__global__ void copyKernel(Output Y, Input X, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
FloatN x[N];
X.load(x, i);
Y.save(x, i);
i += gridSize;
}
}
template <typename FloatN, int N, typename Output, typename Input>
class CopyCuda : public Tunable {
private:
Input &X;
Output &Y;
const int length;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
virtual bool advanceSharedBytes(TuneParam ¶m) const
{
TuneParam next(param);
advanceBlockDim(next); // to get next blockDim
int nthreads = next.block.x * next.block.y * next.block.z;
param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
return false;
}
public:
CopyCuda(Output &Y, Input &X, int length) : X(X), Y(Y), length(length) { }
virtual ~CopyCuda() { ; }
inline TuneKey tuneKey() const {
return TuneKey(blasStrings.vol_str, "copyKernel", blasStrings.aux_str);
}
inline void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
copyKernel<FloatN, N><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(Y, X, length);
}
void preTune() { ; } // no need to save state for copy kernels
void postTune() { ; } // no need to restore state for copy kernels
long long flops() const { return 0; }
long long bytes() const {
const int Ninternal = (sizeof(FloatN)/sizeof(((FloatN*)0)->x))*N;
size_t bytes = (X.Precision() + Y.Precision())*Ninternal;
if (X.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
if (Y.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
return bytes*length;
}
int tuningIter() const { return 3; }
};
void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
if (&src == &dst) return; // aliasing fields
if (src.Nspin() != 1 && src.Nspin() != 4) errorQuda("nSpin(%d) not supported\n", src.Nspin());
if (dst.SiteSubset() == QUDA_FULL_SITE_SUBSET || src.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
if (src.SiteSubset() != dst.SiteSubset())
errorQuda("Spinor fields do not have matching subsets dst=%d src=%d\n",
dst.SiteSubset(), src.SiteSubset());
copy::copyCuda(dst.Even(), src.Even());
copy::copyCuda(dst.Odd(), src.Odd());
return;
}
checkSpinorLength(dst, src);
blasStrings.vol_str = src.VolString();
char tmp[256];
strcpy(tmp, "dst=");
strcat(tmp, dst.AuxString());
strcat(tmp, ",src=");
strcat(tmp, src.AuxString());
blasStrings.aux_str = tmp;
// For a given dst precision, there are two non-trivial possibilities for the
// src precision.
// FIXME: use traits to encapsulate register type for shorts -
// will reduce template type parameters from 3 to 2
blas_bytes += (unsigned long long)src.RealLength()*(src.Precision() + dst.Precision());
if (dst.Precision() == src.Precision()) {
if (src.Bytes() != dst.Bytes()) errorQuda("Precisions match, but bytes do not");
cudaMemcpy(dst.V(), src.V(), dst.Bytes(), cudaMemcpyDeviceToDevice);
if (dst.Precision() == QUDA_HALF_PRECISION) {
cudaMemcpy(dst.Norm(), src.Norm(), dst.NormBytes(), cudaMemcpyDeviceToDevice);
blas_bytes += 2*(unsigned long long)dst.RealLength()*sizeof(float);
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, float4, float4, 6, 0, 0> src_tex(src);
Spinor<float4, float2, double2, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float2, double2, 6, 1>,
Spinor<float4, float4, float4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //src.Nspin() == 1
Spinor<float2, float2, float2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, double2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, double2, 3, 1>,
Spinor<float2, float2, float2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, float2, double2, 6, 0, 0> src_tex(src);
Spinor<float4, float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, float4, 6, 1>,
Spinor<float4, float2, double2, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //src.Nspin() ==1
Spinor<float2, float2, double2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, float2, 3, 1>,
Spinor<float2, float2, double2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas_bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, float4, short4, 6, 0, 0> src_tex(src);
Spinor<float4, float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, float4, 6, 1>,
Spinor<float4, float4, short4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin== 1;
Spinor<float2, float2, short2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, float2, 3, 1>,
Spinor<float2, float2, short2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
blas_bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, float4, float4, 6, 0, 0> src_tex(src);
Spinor<float4, float4, short4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, short4, 6, 1>,
Spinor<float4, float4, float4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
Spinor<float2, float2, float2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, short2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, short2, 3, 1>,
Spinor<float2, float2, float2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas_bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, float4, short4, 12, 0, 0> src_tex(src);
Spinor<double2, double2, double2, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, double2, double2, 12, 1>,
Spinor<double2, float4, short4, 12, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
Spinor<double2, float2, short2, 3, 0, 0> src_tex(src);
Spinor<double2, double2, double2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, double2, double2, 3, 1>,
Spinor<double2, float2, short2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
blas_bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, double2, double2, 12, 0, 0> src_tex(src);
Spinor<double2, double4, short4, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, double4, short4, 12, 1>,
Spinor<double2, double2, double2, 12, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
Spinor<double2, double2, double2, 3, 0, 0> src_tex(src);
Spinor<double2, double2, short2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, double2, short2, 3, 1>,
Spinor<double2, double2, double2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else {
errorQuda("Invalid precision combination dst=%d and src=%d", dst.Precision(), src.Precision());
}
checkCudaError();
}
} // namespace copy
void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
copy::copyCuda(dst, src);
}
} // namespace quda
|
c3c05892a03bd66cfece7e02604f83a0d59d744f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 40
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Addition access
if( ((i%32)<=27) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
// Value2= I1+I2;
// Value3=I1-I2;
// Value1=I1-Value2;
// Value3+=Value1;
// Value2-=Value3;
// Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
|
c3c05892a03bd66cfece7e02604f83a0d59d744f.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 40
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Addition access
if( ((i%32)<=27) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
// Value2= I1+I2;
// Value3=I1-I2;
// Value1=I1-Value2;
// Value3+=Value1;
// Value2-=Value3;
// Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
|
4aa941b395ccdfc8cbe4757cfd2d16bded25ef60.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "auxiliary.h"
#include <math.h>
#include <stdbool.h>
#define PIXELS 256 // meaning image with 64x64 pixels
#define filtSigma 0.02
#define patchSize 7
#define patchSigma 3
__device__ void comparison(float *comparison_value,float *Pi, int j, float *G, float *shared_memory);
__device__ void compute_weight(float *w, float *Pi, int j, float *G, float *shared_memory);
__host__ float * gaussian();
__global__ void compute_f_pixel(float *f_image,float *image, int padded_size, float *G);
//! global variables
__device__ const int dev_PIXELS = PIXELS;
__device__ const float dev_filtSigma = (float)filtSigma;
__device__ const int dev_patchSize = patchSize;
__device__ const int dev_padding = patchSize/2;
__host__ float *nonLocalMeans(float *host_image){
/*
##############################
V2 START
##############################
*/
int padding = patchSize/2;
int n = PIXELS * PIXELS;
int padded_size = n + 4*(padding*PIXELS+ padding *padding);
float *G;
hipMallocManaged(&G, patchSize*patchSize*sizeof(float));
if(G == NULL){
printf("Error.\n");
exit(1);
}
float *temp_G = gaussian();
memcpy(G,temp_G,patchSize*patchSize*sizeof(float));
float *image;
hipMallocManaged(&image, padded_size*sizeof(float));
if(image == NULL){
printf("Error.\n");
exit(1);
}
memcpy(image,host_image, padded_size*sizeof(float));
float *f_image;
hipMallocManaged(&f_image, padded_size*sizeof(float));
if(f_image == NULL){
printf("Error.\n");
exit(1);
}
for(int i=0; i<padded_size; i++)
f_image[i]=(float)-1;
int memory_size = patchSize*(PIXELS+2*padding);
hipLaunchKernelGGL(( compute_f_pixel), dim3(PIXELS),dim3(PIXELS), memory_size*sizeof(float), 0, f_image,image, padded_size, G);
hipDeviceSynchronize();
hipFree(G);
free(temp_G);
hipFree(image);
/*
##############################
V2 END
##############################
*/
return f_image;
}
__global__ void compute_f_pixel(float *f_image,float *image, int padded_size, float *G){
// index i is calculated so that it iterates the original image minus the padding
int i = blockIdx.x*(blockDim.x+2*dev_padding)+(threadIdx.x+dev_padding) + dev_padding*dev_PIXELS+2*dev_padding*dev_padding;
if(i < padded_size){
int row_size = (dev_PIXELS+2*dev_padding);
// Creation of Patch i
float Pi[dev_patchSize*dev_patchSize];
for(int k=0; k<dev_patchSize; k++){
for(int l=0; l<dev_patchSize; l++)
Pi[k*dev_patchSize + l] = image[i +(k-dev_padding)*row_size + l-dev_padding];
}
// shared_memory has the portion of the original image
// that is accessed while doing the calculations for each row
extern __shared__ float shared_memory[];
// Initialization of shared memory
// each thread is responsible for its respective column
for(int s=0; s<dev_patchSize; s++)
shared_memory[ (threadIdx.x +dev_padding) + s*row_size] = image[(threadIdx.x +dev_padding) + s*row_size];
// the first thread is responsible for the padding of "-1" of shared memory
if(threadIdx.x == 0){
for(int c=0; c<dev_padding; c++)
for(int s=0; s<dev_patchSize; s++)
shared_memory[c+s*row_size] = -1;
for(int c=dev_padding+dev_PIXELS; c<row_size; c++)
for(int s=0; s<dev_patchSize; s++)
shared_memory[c+s*row_size] = -1;
}
__syncthreads();
f_image[i] = 0;
float Z = 0;
float w;
for(int r=dev_padding; r<dev_PIXELS+dev_padding; r++){
for(int j=dev_padding; j<(dev_PIXELS+dev_padding); j++){
compute_weight(&w, Pi, j, G, shared_memory);
Z += w;
f_image[i] += w * shared_memory[dev_padding*row_size+j];
}
// Update the shared memory so that it contains the pixels for the next row (slide down the shared memory)
__syncthreads();
for(int s=0; s<dev_patchSize-1; s++)
shared_memory[ (threadIdx.x +dev_padding) + s*row_size] = shared_memory[(threadIdx.x +dev_padding) + (s+1)*row_size];
int offset = (r-dev_padding + 1)*row_size;
shared_memory[(threadIdx.x +dev_padding) + (dev_patchSize-1)*row_size] = image[offset+(threadIdx.x +dev_padding) + (dev_patchSize-1)*row_size];
__syncthreads();
}
f_image[i] = f_image[i] / Z;
}
}
__host__ int main(){
// Convert txt to C array
int padding = patchSize/2;
float *host_image = read_txt(PIXELS, padding);
float *f_image;
hipMallocManaged(&f_image,0);
struct timespec tic;
clock_gettime( CLOCK_MONOTONIC, &tic);
f_image = nonLocalMeans(host_image);
struct timespec toc;
clock_gettime( CLOCK_MONOTONIC, &toc);
printf("\n ******************************\n V2 duration = %f sec\n ******************************\n\n",time_spent(tic, toc));
// Convert C array to txt
int padded_size = PIXELS*PIXELS + 4*(padding*PIXELS+ padding *padding);
FILE *f = fopen("filtered_image_V2.txt", "w");
int counter = 0;
for(int i=(PIXELS*padding+2*padding*padding); i<padded_size - (PIXELS*padding+2*padding*padding); i++){
if(f_image[i] == (float)-1)
continue;
fprintf(f, "%f ", f_image[i]);
counter++;
if(counter == PIXELS){
counter = 0;
fprintf(f, "\n");
}
}
fclose(f);
free(host_image);
hipFree(f_image);
return 0;
}
//! Compares Patch i and Patch j
__device__ void comparison(float *comparison_value,float *Pi, int j, float *G, float *shared_memory){
j = j + dev_padding*(dev_PIXELS+2*dev_padding);
for(int k=0; k<dev_patchSize; k++){
for(int l=0; l<dev_patchSize; l++){
if(Pi[k*dev_patchSize+l] != (float)-1 && shared_memory[j+(k-dev_padding)*(dev_PIXELS+2*dev_padding) + l-dev_padding] != (float)-1){
float diff = Pi[k*dev_patchSize+l] - shared_memory[j+(k-dev_padding)*(dev_PIXELS+2*dev_padding) + l-dev_padding];
*comparison_value += G[k*dev_patchSize+l] * diff * diff;
}
}
}
}
//! Computes the w(i,j)
__device__ void compute_weight(float *w, float *Pi, int j, float *G, float *shared_memory){
float comparison_value = 0;
comparison(&comparison_value, Pi, j, G, shared_memory);
*w = (float)(exp(-comparison_value/(dev_filtSigma*dev_filtSigma)));
}
//! Compute the gaussian filter
__host__ float * gaussian(){
float *G = (float *)malloc(patchSize*patchSize*sizeof(float));
if(G == NULL){
printf("Error.\n");
exit(1);
}
int bound = patchSize/2;
for(int x=-bound; x<=bound; x++){ // if patchSize=5 then x = -2,-1,0,+1,+2
for(int y=-bound; y<=bound; y++){
float G_temp = exp(-(float)(x*x+y*y)/(float)(2*patchSigma*patchSigma))/(float)(2*M_PI*patchSigma*patchSigma); // 2D Gaussian filter
int i = (x+bound)*patchSize +(y+bound); // i = 0*5+{0,1,2,3,4}, 1*5+{0,1,2,3,4},..., 4*5+{0,1,2,3,4}
G[i] = G_temp;
}
}
return G;
}
|
4aa941b395ccdfc8cbe4757cfd2d16bded25ef60.cu
|
#include <stdio.h>
#include <stdlib.h>
#include "auxiliary.h"
#include <math.h>
#include <stdbool.h>
#define PIXELS 256 // meaning image with 64x64 pixels
#define filtSigma 0.02
#define patchSize 7
#define patchSigma 3
__device__ void comparison(float *comparison_value,float *Pi, int j, float *G, float *shared_memory);
__device__ void compute_weight(float *w, float *Pi, int j, float *G, float *shared_memory);
__host__ float * gaussian();
__global__ void compute_f_pixel(float *f_image,float *image, int padded_size, float *G);
//! global variables
__device__ const int dev_PIXELS = PIXELS;
__device__ const float dev_filtSigma = (float)filtSigma;
__device__ const int dev_patchSize = patchSize;
__device__ const int dev_padding = patchSize/2;
__host__ float *nonLocalMeans(float *host_image){
/*
##############################
V2 START
##############################
*/
int padding = patchSize/2;
int n = PIXELS * PIXELS;
int padded_size = n + 4*(padding*PIXELS+ padding *padding);
float *G;
cudaMallocManaged(&G, patchSize*patchSize*sizeof(float));
if(G == NULL){
printf("Error.\n");
exit(1);
}
float *temp_G = gaussian();
memcpy(G,temp_G,patchSize*patchSize*sizeof(float));
float *image;
cudaMallocManaged(&image, padded_size*sizeof(float));
if(image == NULL){
printf("Error.\n");
exit(1);
}
memcpy(image,host_image, padded_size*sizeof(float));
float *f_image;
cudaMallocManaged(&f_image, padded_size*sizeof(float));
if(f_image == NULL){
printf("Error.\n");
exit(1);
}
for(int i=0; i<padded_size; i++)
f_image[i]=(float)-1;
int memory_size = patchSize*(PIXELS+2*padding);
compute_f_pixel<<<PIXELS,PIXELS, memory_size*sizeof(float)>>>(f_image,image, padded_size, G);
cudaDeviceSynchronize();
cudaFree(G);
free(temp_G);
cudaFree(image);
/*
##############################
V2 END
##############################
*/
return f_image;
}
__global__ void compute_f_pixel(float *f_image,float *image, int padded_size, float *G){
// index i is calculated so that it iterates the original image minus the padding
int i = blockIdx.x*(blockDim.x+2*dev_padding)+(threadIdx.x+dev_padding) + dev_padding*dev_PIXELS+2*dev_padding*dev_padding;
if(i < padded_size){
int row_size = (dev_PIXELS+2*dev_padding);
// Creation of Patch i
float Pi[dev_patchSize*dev_patchSize];
for(int k=0; k<dev_patchSize; k++){
for(int l=0; l<dev_patchSize; l++)
Pi[k*dev_patchSize + l] = image[i +(k-dev_padding)*row_size + l-dev_padding];
}
// shared_memory has the portion of the original image
// that is accessed while doing the calculations for each row
extern __shared__ float shared_memory[];
// Initialization of shared memory
// each thread is responsible for its respective column
for(int s=0; s<dev_patchSize; s++)
shared_memory[ (threadIdx.x +dev_padding) + s*row_size] = image[(threadIdx.x +dev_padding) + s*row_size];
// the first thread is responsible for the padding of "-1" of shared memory
if(threadIdx.x == 0){
for(int c=0; c<dev_padding; c++)
for(int s=0; s<dev_patchSize; s++)
shared_memory[c+s*row_size] = -1;
for(int c=dev_padding+dev_PIXELS; c<row_size; c++)
for(int s=0; s<dev_patchSize; s++)
shared_memory[c+s*row_size] = -1;
}
__syncthreads();
f_image[i] = 0;
float Z = 0;
float w;
for(int r=dev_padding; r<dev_PIXELS+dev_padding; r++){
for(int j=dev_padding; j<(dev_PIXELS+dev_padding); j++){
compute_weight(&w, Pi, j, G, shared_memory);
Z += w;
f_image[i] += w * shared_memory[dev_padding*row_size+j];
}
// Update the shared memory so that it contains the pixels for the next row (slide down the shared memory)
__syncthreads();
for(int s=0; s<dev_patchSize-1; s++)
shared_memory[ (threadIdx.x +dev_padding) + s*row_size] = shared_memory[(threadIdx.x +dev_padding) + (s+1)*row_size];
int offset = (r-dev_padding + 1)*row_size;
shared_memory[(threadIdx.x +dev_padding) + (dev_patchSize-1)*row_size] = image[offset+(threadIdx.x +dev_padding) + (dev_patchSize-1)*row_size];
__syncthreads();
}
f_image[i] = f_image[i] / Z;
}
}
__host__ int main(){
// Convert txt to C array
int padding = patchSize/2;
float *host_image = read_txt(PIXELS, padding);
float *f_image;
cudaMallocManaged(&f_image,0);
struct timespec tic;
clock_gettime( CLOCK_MONOTONIC, &tic);
f_image = nonLocalMeans(host_image);
struct timespec toc;
clock_gettime( CLOCK_MONOTONIC, &toc);
printf("\n ******************************\n V2 duration = %f sec\n ******************************\n\n",time_spent(tic, toc));
// Convert C array to txt
int padded_size = PIXELS*PIXELS + 4*(padding*PIXELS+ padding *padding);
FILE *f = fopen("filtered_image_V2.txt", "w");
int counter = 0;
for(int i=(PIXELS*padding+2*padding*padding); i<padded_size - (PIXELS*padding+2*padding*padding); i++){
if(f_image[i] == (float)-1)
continue;
fprintf(f, "%f ", f_image[i]);
counter++;
if(counter == PIXELS){
counter = 0;
fprintf(f, "\n");
}
}
fclose(f);
free(host_image);
cudaFree(f_image);
return 0;
}
//! Compares Patch i and Patch j
__device__ void comparison(float *comparison_value,float *Pi, int j, float *G, float *shared_memory){
j = j + dev_padding*(dev_PIXELS+2*dev_padding);
for(int k=0; k<dev_patchSize; k++){
for(int l=0; l<dev_patchSize; l++){
if(Pi[k*dev_patchSize+l] != (float)-1 && shared_memory[j+(k-dev_padding)*(dev_PIXELS+2*dev_padding) + l-dev_padding] != (float)-1){
float diff = Pi[k*dev_patchSize+l] - shared_memory[j+(k-dev_padding)*(dev_PIXELS+2*dev_padding) + l-dev_padding];
*comparison_value += G[k*dev_patchSize+l] * diff * diff;
}
}
}
}
//! Computes the w(i,j)
__device__ void compute_weight(float *w, float *Pi, int j, float *G, float *shared_memory){
float comparison_value = 0;
comparison(&comparison_value, Pi, j, G, shared_memory);
*w = (float)(exp(-comparison_value/(dev_filtSigma*dev_filtSigma)));
}
//! Compute the gaussian filter
__host__ float * gaussian(){
float *G = (float *)malloc(patchSize*patchSize*sizeof(float));
if(G == NULL){
printf("Error.\n");
exit(1);
}
int bound = patchSize/2;
for(int x=-bound; x<=bound; x++){ // if patchSize=5 then x = -2,-1,0,+1,+2
for(int y=-bound; y<=bound; y++){
float G_temp = exp(-(float)(x*x+y*y)/(float)(2*patchSigma*patchSigma))/(float)(2*M_PI*patchSigma*patchSigma); // 2D Gaussian filter
int i = (x+bound)*patchSize +(y+bound); // i = 0*5+{0,1,2,3,4}, 1*5+{0,1,2,3,4},..., 4*5+{0,1,2,3,4}
G[i] = G_temp;
}
}
return G;
}
|
80a9137c8c4181c9abe2b87f966528db50229309.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include "lodepng.h"
#define MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
// Kernel definition
__global__ void invertPicture(unsigned char *d_image, unsigned int size, unsigned int offset)
{
unsigned int tid = threadIdx.x;
unsigned int gtid = tid + blockDim.x*blockIdx.x;
for (int i = gtid*offset; i < MIN(gtid*offset+offset, size); i++)
{
d_image[i] = ~d_image[i];
}
}
unsigned int round_div(unsigned int dividend, unsigned int divisor)
{
return (dividend + (divisor / 2)) / divisor;
}
int main(int argc, char ** argv)
{
size_t pngsize;
unsigned char *png;
const char * filename = "lenna512x512_inv.png";
/* Read in the image */
lodepng_load_file(&png, &pngsize, filename);
unsigned char *h_image, *d_image;
unsigned int width, height;
/* Decode it into a RGB 8-bit per channel vector */
unsigned int error = lodepng_decode24(&h_image, &width, &height, png, pngsize);
/* Check if read and decode of .png went well */
if (error != 0)
{
std::cout << "error " << error << ": " << lodepng_error_text(error) << std::endl;
}
//float *h_x, *d_x; // h=host, d=device
unsigned int n_blocks = 1<<5,
n_threads_per_block = 1<<9, // 2^3 = 8
n_size = n_blocks * n_threads_per_block,
size = width*height*3,
offset = round_div(size, n_size);
hipMalloc((void **) &d_image, size*sizeof(unsigned char));
hipMemcpy(d_image, h_image, size*sizeof(unsigned char), hipMemcpyHostToDevice);
// Kernel invocation
hipLaunchKernelGGL(( invertPicture), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, size, offset);
hipDeviceSynchronize(); // Wait for invertPicture to finish on CUDA
hipMemcpy(h_image, d_image, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
/* Save the result to a new .png file */
lodepng_encode24_file("lenna512x512_orig.png", h_image, width, height);
// Cleanup
hipFree(d_image);
free(h_image);
return 0;
}
|
80a9137c8c4181c9abe2b87f966528db50229309.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
#include "lodepng.h"
#define MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
// Kernel definition
__global__ void invertPicture(unsigned char *d_image, unsigned int size, unsigned int offset)
{
unsigned int tid = threadIdx.x;
unsigned int gtid = tid + blockDim.x*blockIdx.x;
for (int i = gtid*offset; i < MIN(gtid*offset+offset, size); i++)
{
d_image[i] = ~d_image[i];
}
}
unsigned int round_div(unsigned int dividend, unsigned int divisor)
{
return (dividend + (divisor / 2)) / divisor;
}
int main(int argc, char ** argv)
{
size_t pngsize;
unsigned char *png;
const char * filename = "lenna512x512_inv.png";
/* Read in the image */
lodepng_load_file(&png, &pngsize, filename);
unsigned char *h_image, *d_image;
unsigned int width, height;
/* Decode it into a RGB 8-bit per channel vector */
unsigned int error = lodepng_decode24(&h_image, &width, &height, png, pngsize);
/* Check if read and decode of .png went well */
if (error != 0)
{
std::cout << "error " << error << ": " << lodepng_error_text(error) << std::endl;
}
//float *h_x, *d_x; // h=host, d=device
unsigned int n_blocks = 1<<5,
n_threads_per_block = 1<<9, // 2^3 = 8
n_size = n_blocks * n_threads_per_block,
size = width*height*3,
offset = round_div(size, n_size);
cudaMalloc((void **) &d_image, size*sizeof(unsigned char));
cudaMemcpy(d_image, h_image, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
// Kernel invocation
invertPicture<<<n_blocks, n_threads_per_block>>>(d_image, size, offset);
cudaThreadSynchronize(); // Wait for invertPicture to finish on CUDA
cudaMemcpy(h_image, d_image, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
/* Save the result to a new .png file */
lodepng_encode24_file("lenna512x512_orig.png", h_image, width, height);
// Cleanup
cudaFree(d_image);
free(h_image);
return 0;
}
|
87c36d522f6e9846cb042164bbc0d844ca51c3cb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include "llenar_vecinos.h"
int main(int argc, char** argv) {
int row=6;
int colum=6;
int node=row*colum;
int nvec=9;
int nvel=9;
int *dev_vecinos;
double *dev_velocidad;
double *dev_suma;
double *hst_velocidad;
int *hst_vecinos;
// reserva en el host
hst_velocidad = (double*)malloc( node*nvel*sizeof(double) );
hst_vecinos = (int*)malloc( node*nvec*sizeof(int) );
// reserva en el device
hipMalloc( (void**)&dev_velocidad, node*nvel*sizeof(double) );
hipMalloc( (void**)&dev_suma, node*nvel*sizeof(double) );
hipMalloc( (void**)&dev_vecinos, node*nvec*sizeof(int) );
// inicializacion de datos
llenarVecinos(&hst_vecinos, row, colum);
// Liberacion de memoria
hipFree( dev_vecinos );
hipFree( dev_velocidad );
hipFree( hst_velocidad );
free(hst_velocidad);
free(hst_vecinos);
return 0;
}
|
87c36d522f6e9846cb042164bbc0d844ca51c3cb.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#include "llenar_vecinos.h"
int main(int argc, char** argv) {
int row=6;
int colum=6;
int node=row*colum;
int nvec=9;
int nvel=9;
int *dev_vecinos;
double *dev_velocidad;
double *dev_suma;
double *hst_velocidad;
int *hst_vecinos;
// reserva en el host
hst_velocidad = (double*)malloc( node*nvel*sizeof(double) );
hst_vecinos = (int*)malloc( node*nvec*sizeof(int) );
// reserva en el device
cudaMalloc( (void**)&dev_velocidad, node*nvel*sizeof(double) );
cudaMalloc( (void**)&dev_suma, node*nvel*sizeof(double) );
cudaMalloc( (void**)&dev_vecinos, node*nvec*sizeof(int) );
// inicializacion de datos
llenarVecinos(&hst_vecinos, row, colum);
// Liberacion de memoria
cudaFree( dev_vecinos );
cudaFree( dev_velocidad );
cudaFree( hst_velocidad );
free(hst_velocidad);
free(hst_vecinos);
return 0;
}
|
0a6b7620c273168d8b41517c0c37212dfe6460c5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <utility>
#include <vector>
#include <device_launch_parameters.h>
#include "caffe/layers/batch_reindex_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
__global__ void BRForward(const int count, const int inner_dim, const Dtype* in,
const Dtype* permut, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / (inner_dim);
int in_n = static_cast<int>(permut[n]);
out[index] = in[in_n * (inner_dim) + index % (inner_dim)];
}
}
template <typename Ftype, typename Btype>
void BatchReindexLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
check_batch_reindex(bottom[0]->shape(0), bottom[1]->count(),
bottom[1]->cpu_data<Ftype>());
if (top[0]->count() == 0) {
return;
}
int threads = top[0]->count();
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BRForward<Ftype>), dim3(CAFFE_GET_BLOCKS(threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
top[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
bottom[0]->gpu_data<Ftype>(), bottom[1]->gpu_data<Ftype>(),
top[0]->mutable_gpu_data<Ftype>());
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void BRBackward(const int count, const int inner_dim,
const Dtype* in, const int* top_indexes,
const int* begins, const int* counts,
Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / inner_dim;
int nr = index % inner_dim;
Dtype oi = Dtype(0.);
int lower = begins[n];
int upper = lower + counts[n];
for (int i = lower; i < upper; ++i) {
oi += in[top_indexes[i] * inner_dim + nr];
}
out[index] = oi;
}
}
template <typename Ftype, typename Btype>
void BatchReindexLayer<Ftype, Btype>::Backward_gpu(
const vector<Blob*>& top, const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
CHECK(!propagate_down[1]) << "Cannot backprop to index.";
if (!propagate_down[0]) {
return;
}
vector<std::pair<int, int> > mapping;
const Btype* perm = bottom[1]->cpu_data<Btype>();
for (int i = 0; i < bottom[1]->count(); ++i) {
mapping.emplace_back(make_pair(static_cast<int>(perm[i]), i));
}
std::sort(mapping.begin(), mapping.end(), pair_sort_first());
// Each element of the bottom diff is potentially the sum of many top diffs.
// However, we'd like each CUDA thread to handle exactly one output. Hence,
// we first pre-compute a list of lists of indices that need to be summed for
// each output. `top_indexes` holds the data of this list of lists. The
// k'th element of `begins` points to the location in `top_indexes` where the
// list for the k'th example begin, and the k'th element of `counts` is the
// length of that list.
vector<int> shape;
shape.push_back(bottom[1]->count());
TBlob<int> top_indexes(shape);
shape[0] = bottom[0]->shape(0);
TBlob<int> counts(shape);
TBlob<int> begins(shape);
int* t_i_data = top_indexes.mutable_cpu_data();
int* c_data = counts.mutable_cpu_data();
int* b_data = begins.mutable_cpu_data();
caffe_set(begins.count(), -1, b_data);
caffe_set(counts.count(), 0, c_data);
for (int i = 0; i < mapping.size(); ++i) {
t_i_data[i] = mapping[i].second;
if (b_data[mapping[i].first] == -1) {
b_data[mapping[i].first] = i;
}
c_data[mapping[i].first] += 1;
}
int threads = bottom[0]->count();
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BRBackward<Btype>), dim3(CAFFE_GET_BLOCKS(threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
bottom[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
top[0]->gpu_diff<Btype>(), top_indexes.gpu_data(), begins.gpu_data(),
counts.gpu_data(), bottom[0]->mutable_gpu_diff<Btype>());
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(BatchReindexLayer);
} // namespace caffe
|
0a6b7620c273168d8b41517c0c37212dfe6460c5.cu
|
#include <algorithm>
#include <utility>
#include <vector>
#include <device_launch_parameters.h>
#include "caffe/layers/batch_reindex_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
__global__ void BRForward(const int count, const int inner_dim, const Dtype* in,
const Dtype* permut, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / (inner_dim);
int in_n = static_cast<int>(permut[n]);
out[index] = in[in_n * (inner_dim) + index % (inner_dim)];
}
}
template <typename Ftype, typename Btype>
void BatchReindexLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
check_batch_reindex(bottom[0]->shape(0), bottom[1]->count(),
bottom[1]->cpu_data<Ftype>());
if (top[0]->count() == 0) {
return;
}
int threads = top[0]->count();
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
BRForward<Ftype><<<CAFFE_GET_BLOCKS(threads), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
top[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
bottom[0]->gpu_data<Ftype>(), bottom[1]->gpu_data<Ftype>(),
top[0]->mutable_gpu_data<Ftype>());
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void BRBackward(const int count, const int inner_dim,
const Dtype* in, const int* top_indexes,
const int* begins, const int* counts,
Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / inner_dim;
int nr = index % inner_dim;
Dtype oi = Dtype(0.);
int lower = begins[n];
int upper = lower + counts[n];
for (int i = lower; i < upper; ++i) {
oi += in[top_indexes[i] * inner_dim + nr];
}
out[index] = oi;
}
}
template <typename Ftype, typename Btype>
void BatchReindexLayer<Ftype, Btype>::Backward_gpu(
const vector<Blob*>& top, const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
CHECK(!propagate_down[1]) << "Cannot backprop to index.";
if (!propagate_down[0]) {
return;
}
vector<std::pair<int, int> > mapping;
const Btype* perm = bottom[1]->cpu_data<Btype>();
for (int i = 0; i < bottom[1]->count(); ++i) {
mapping.emplace_back(make_pair(static_cast<int>(perm[i]), i));
}
std::sort(mapping.begin(), mapping.end(), pair_sort_first());
// Each element of the bottom diff is potentially the sum of many top diffs.
// However, we'd like each CUDA thread to handle exactly one output. Hence,
// we first pre-compute a list of lists of indices that need to be summed for
// each output. `top_indexes` holds the data of this list of lists. The
// k'th element of `begins` points to the location in `top_indexes` where the
// list for the k'th example begin, and the k'th element of `counts` is the
// length of that list.
vector<int> shape;
shape.push_back(bottom[1]->count());
TBlob<int> top_indexes(shape);
shape[0] = bottom[0]->shape(0);
TBlob<int> counts(shape);
TBlob<int> begins(shape);
int* t_i_data = top_indexes.mutable_cpu_data();
int* c_data = counts.mutable_cpu_data();
int* b_data = begins.mutable_cpu_data();
caffe_set(begins.count(), -1, b_data);
caffe_set(counts.count(), 0, c_data);
for (int i = 0; i < mapping.size(); ++i) {
t_i_data[i] = mapping[i].second;
if (b_data[mapping[i].first] == -1) {
b_data[mapping[i].first] = i;
}
c_data[mapping[i].first] += 1;
}
int threads = bottom[0]->count();
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
BRBackward<Btype><<<CAFFE_GET_BLOCKS(threads), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
bottom[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
top[0]->gpu_diff<Btype>(), top_indexes.gpu_data(), begins.gpu_data(),
counts.gpu_data(), bottom[0]->mutable_gpu_diff<Btype>());
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(BatchReindexLayer);
} // namespace caffe
|
70abd249a91577e10a178c89aa59dffb03fc82f2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <torch/extension.h>
#include <THH/THHAtomics.cuh>
const int BATCH = 512;
template <typename scalar_t>
__global__ void NmDistanceKernel(int b,int n,int c,const scalar_t * xyz,int m,const scalar_t * xyz2,scalar_t * result,int * result_i){
// const int batch=512;
extern __shared__ __align__(sizeof(scalar_t)) unsigned char my_smem[];
scalar_t *buf = reinterpret_cast<scalar_t *>(my_smem);
// SharedMemory<scalar_t> smem;
// scalar_t* buf = smem.getPointer();
for (int i=blockIdx.x;i<b;i+=gridDim.x){
// process xyz2 points by chunks
// each chunk:
// 1. sequentially fill shared buffer with xyz2
// 2. for each point in xyz1, find NN from the buffer
for (int k2=0;k2<m;k2+=BATCH){
int end_k=min(m,k2+BATCH)-k2;
for (int j=threadIdx.x;j<end_k*c;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*c+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
int best_i=0;
scalar_t best=0;
// int end_ka=end_k-(end_k&3);
for (int k=0;k<end_k;k++){
scalar_t d = 0;
for (int _c = 0; _c < c; _c++){
scalar_t tmp = buf[k*c+_c]-xyz[(i*n+j)*c+_c];
d += (tmp*tmp);
}
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// xyz (b,n,3)
// xyz2 (b,m,3)
// label (b,n,1)
// label2 (b,m,1)
template <typename scalar_t>
__global__ void LabeledNmDistanceKernel(int b,int n,int c, const scalar_t * xyz, const scalar_t *label,
int m, const scalar_t * xyz2, const scalar_t *label2,
scalar_t * result, int * result_i){
// const int BATCH=512;
extern __shared__ __align__(sizeof(scalar_t)) unsigned char my_smem[];
scalar_t *buf = reinterpret_cast<scalar_t *>(my_smem);
// loop over BATCH of xyz1
for (int i=blockIdx.x;i<b;i+=gridDim.x){
// process xyz2 points by chunks
// each chunk:
// 1. sequentially fill shared buffer with xyz2 (into first BATCH*c) and label2 (into BATCH*1)
// 2. for each point in xyz1, find NN from the buffer
for (int k2=0;k2<m;k2+=BATCH){
// idx of the last point in xyz2 to fill the batch
int end_k=min(m,k2+BATCH)-k2;
int begin_of_label = BATCH*c;
for (int j=threadIdx.x;j<end_k*c;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*c+j];
}
for (int j=threadIdx.x;j<end_k;j+=blockDim.x){
buf[j+begin_of_label]=label2[(i*m+k2)+j];
}
__syncthreads();
// loop over current BATCH in xyz1
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
const scalar_t l1 = label[(i*n+j)];
int best_i=-1;
// TODO initialize with max scalar_t
scalar_t best=scalar_t(1e10);
// int end_ka=end_k-(end_k&3);
for (int k=0;k<end_k;k++){
const scalar_t l2 = buf[begin_of_label+k];
// const scalar_t l2 = label2[(i*m+k+k2)];
if (l1 == l2) {
scalar_t d = 0;
for (int _c = 0; _c < c; _c++){
scalar_t tmp = buf[k*c+_c]-xyz[(i*n+j)*c+_c];
d += (tmp*tmp);
}
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
// after processing all xyz2 and xyz1 for this batch
// go over result_i of xyz1, check if index < 0, change distance to 0
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
if (result_i[((i*n+j))] < 0)
result[(i*n+j)] = 0;
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, hipStream_t stream){
int chamfer_cuda_forward(at::Tensor& xyz1, at::Tensor& xyz2, at::Tensor& dist1, at::Tensor& dist2, at::Tensor& idx1, at::Tensor& idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
const auto c = xyz1.size(2); //point dimension
CHECK_EQ(xyz2.size(2), c);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
xyz1.scalar_type(), "NmDistanceKernel", ([&] {
hipLaunchKernelGGL(( NmDistanceKernel<scalar_t>), dim3(dim3(batch_size,16,1)),dim3(BATCH),BATCH*c*sizeof(scalar_t), 0, batch_size, n, c, xyz1.data_ptr<scalar_t>(), m, xyz2.data_ptr<scalar_t>(), dist1.data_ptr<scalar_t>(), idx1.data_ptr<int>());
hipLaunchKernelGGL(( NmDistanceKernel<scalar_t>), dim3(dim3(batch_size,16,1)),dim3(BATCH),BATCH*c*sizeof(scalar_t), 0, batch_size, m, c, xyz2.data_ptr<scalar_t>(), n, xyz1.data_ptr<scalar_t>(), dist2.data_ptr<scalar_t>(), idx2.data_ptr<int>());
})
);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd updateOutput: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
int labeled_chamfer_cuda_forward(const at::Tensor& xyz1, const at::Tensor& xyz2, const at::Tensor& label1, const at::Tensor& label2,
at::Tensor& dist1, at::Tensor& dist2, at::Tensor& idx1, at::Tensor& idx2){
const int batch_size = xyz1.size(0);
const int n = xyz1.size(1); //num_points point cloud A
const int m = xyz2.size(1); //num_points point cloud B
const int c = xyz1.size(2); //point dimension
CHECK_EQ(xyz2.size(2), c);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
xyz1.scalar_type(), "NmDistanceKernel", ([&] {
hipLaunchKernelGGL(( LabeledNmDistanceKernel<scalar_t>), dim3(dim3(batch_size,16,1)),dim3(BATCH),BATCH*(c+1)*sizeof(scalar_t), 0, batch_size, n, c, xyz1.data_ptr<scalar_t>(), label1.toType(xyz1.scalar_type()).data_ptr<scalar_t>(), m,
xyz2.data_ptr<scalar_t>(), label2.toType(xyz1.scalar_type()).data_ptr<scalar_t>(), dist1.data_ptr<scalar_t>(), idx1.data_ptr<int>());
hipLaunchKernelGGL(( LabeledNmDistanceKernel<scalar_t>), dim3(dim3(batch_size,16,1)),dim3(BATCH),BATCH*(c+1)*sizeof(scalar_t), 0, batch_size, m, c, xyz2.data_ptr<scalar_t>(), label2.toType(xyz1.scalar_type()).data_ptr<scalar_t>(), n,
xyz1.data_ptr<scalar_t>(), label1.toType(xyz1.scalar_type()).data_ptr<scalar_t>(), dist2.data_ptr<scalar_t>(), idx2.data_ptr<int>());
})
);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd updateOutput: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
template <typename scalar_t>
__global__ void NmDistanceGradKernel(int b,int n,int c,const scalar_t * xyz1,int m,const scalar_t * xyz2,
const scalar_t * grad_dist1, const int * idx1, scalar_t * grad_xyz1, scalar_t * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
int j2=idx1[i*n+j];
// ignore negative indices (for labeled_nmdistance points can have no closest neighbors!)
if (j2 < 0) continue;
scalar_t g=grad_dist1[i*n+j]*2;
for (int _c = 0; _c < c; _c++)
{
scalar_t xyz_g = g*(xyz1[(i*n+j)*c+_c]-xyz2[(i*m+j2)*c+_c]);
atomicAdd(&(grad_xyz1[(i*n+j)*c+_c]), xyz_g);
atomicAdd(&(grad_xyz2[(i*m+j2)*c+_c]),-xyz_g);
}
}
}
}
// xyz1 (b,n,c)
// xyz2 (b,m,c)
// gradxyz1 (b,n,3)
// gradxyz2 (b,m,3)
// graddist1 (b,n)
// graddist2 (b,m)
// idx1 (b,n) idx for the closest neighbor of each point in xyz1
// idx2 (b,m) ... xyz2
int chamfer_cuda_backward(at::Tensor& xyz1, at::Tensor& xyz2, at::Tensor& gradxyz1, at::Tensor& gradxyz2, at::Tensor& graddist1, at::Tensor& graddist2, at::Tensor& idx1, at::Tensor& idx2){
// hipMemset(grad_xyz1,0,b*n*3*4);
// hipMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
const auto c = xyz1.size(2); //point dimension
// set to zero
gradxyz1.zero_();
gradxyz2.zero_();
CHECK_EQ(xyz2.size(2), c);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
xyz1.scalar_type(), "NmDistanceGradKernel", ([&] {
hipLaunchKernelGGL(( NmDistanceGradKernel<scalar_t>), dim3(dim3(batch_size,16,1)),dim3(256), 0, 0, batch_size,n,c,xyz1.data_ptr<scalar_t>(),m,xyz2.data_ptr<scalar_t>(),graddist1.data_ptr<scalar_t>(),idx1.data_ptr<int>(),gradxyz1.data_ptr<scalar_t>(),gradxyz2.data_ptr<scalar_t>());
hipLaunchKernelGGL(( NmDistanceGradKernel<scalar_t>), dim3(dim3(batch_size,16,1)),dim3(256), 0, 0, batch_size,m,c,xyz2.data_ptr<scalar_t>(),n,xyz1.data_ptr<scalar_t>(),graddist2.data_ptr<scalar_t>(),idx2.data_ptr<int>(),gradxyz2.data_ptr<scalar_t>(),gradxyz1.data_ptr<scalar_t>());
})
);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd get grad: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
|
70abd249a91577e10a178c89aa59dffb03fc82f2.cu
|
#include <stdio.h>
#include <torch/extension.h>
#include <THC/THCAtomics.cuh>
const int BATCH = 512;
template <typename scalar_t>
__global__ void NmDistanceKernel(int b,int n,int c,const scalar_t * xyz,int m,const scalar_t * xyz2,scalar_t * result,int * result_i){
// const int batch=512;
extern __shared__ __align__(sizeof(scalar_t)) unsigned char my_smem[];
scalar_t *buf = reinterpret_cast<scalar_t *>(my_smem);
// SharedMemory<scalar_t> smem;
// scalar_t* buf = smem.getPointer();
for (int i=blockIdx.x;i<b;i+=gridDim.x){
// process xyz2 points by chunks
// each chunk:
// 1. sequentially fill shared buffer with xyz2
// 2. for each point in xyz1, find NN from the buffer
for (int k2=0;k2<m;k2+=BATCH){
int end_k=min(m,k2+BATCH)-k2;
for (int j=threadIdx.x;j<end_k*c;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*c+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
int best_i=0;
scalar_t best=0;
// int end_ka=end_k-(end_k&3);
for (int k=0;k<end_k;k++){
scalar_t d = 0;
for (int _c = 0; _c < c; _c++){
scalar_t tmp = buf[k*c+_c]-xyz[(i*n+j)*c+_c];
d += (tmp*tmp);
}
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// xyz (b,n,3)
// xyz2 (b,m,3)
// label (b,n,1)
// label2 (b,m,1)
template <typename scalar_t>
__global__ void LabeledNmDistanceKernel(int b,int n,int c, const scalar_t * xyz, const scalar_t *label,
int m, const scalar_t * xyz2, const scalar_t *label2,
scalar_t * result, int * result_i){
// const int BATCH=512;
extern __shared__ __align__(sizeof(scalar_t)) unsigned char my_smem[];
scalar_t *buf = reinterpret_cast<scalar_t *>(my_smem);
// loop over BATCH of xyz1
for (int i=blockIdx.x;i<b;i+=gridDim.x){
// process xyz2 points by chunks
// each chunk:
// 1. sequentially fill shared buffer with xyz2 (into first BATCH*c) and label2 (into BATCH*1)
// 2. for each point in xyz1, find NN from the buffer
for (int k2=0;k2<m;k2+=BATCH){
// idx of the last point in xyz2 to fill the batch
int end_k=min(m,k2+BATCH)-k2;
int begin_of_label = BATCH*c;
for (int j=threadIdx.x;j<end_k*c;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*c+j];
}
for (int j=threadIdx.x;j<end_k;j+=blockDim.x){
buf[j+begin_of_label]=label2[(i*m+k2)+j];
}
__syncthreads();
// loop over current BATCH in xyz1
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
const scalar_t l1 = label[(i*n+j)];
int best_i=-1;
// TODO initialize with max scalar_t
scalar_t best=scalar_t(1e10);
// int end_ka=end_k-(end_k&3);
for (int k=0;k<end_k;k++){
const scalar_t l2 = buf[begin_of_label+k];
// const scalar_t l2 = label2[(i*m+k+k2)];
if (l1 == l2) {
scalar_t d = 0;
for (int _c = 0; _c < c; _c++){
scalar_t tmp = buf[k*c+_c]-xyz[(i*n+j)*c+_c];
d += (tmp*tmp);
}
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
// after processing all xyz2 and xyz1 for this batch
// go over result_i of xyz1, check if index < 0, change distance to 0
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
if (result_i[((i*n+j))] < 0)
result[(i*n+j)] = 0;
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){
int chamfer_cuda_forward(at::Tensor& xyz1, at::Tensor& xyz2, at::Tensor& dist1, at::Tensor& dist2, at::Tensor& idx1, at::Tensor& idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
const auto c = xyz1.size(2); //point dimension
CHECK_EQ(xyz2.size(2), c);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
xyz1.scalar_type(), "NmDistanceKernel", ([&] {
NmDistanceKernel<scalar_t><<<dim3(batch_size,16,1),BATCH,BATCH*c*sizeof(scalar_t)>>>(batch_size, n, c, xyz1.data_ptr<scalar_t>(), m, xyz2.data_ptr<scalar_t>(), dist1.data_ptr<scalar_t>(), idx1.data_ptr<int>());
NmDistanceKernel<scalar_t><<<dim3(batch_size,16,1),BATCH,BATCH*c*sizeof(scalar_t)>>>(batch_size, m, c, xyz2.data_ptr<scalar_t>(), n, xyz1.data_ptr<scalar_t>(), dist2.data_ptr<scalar_t>(), idx2.data_ptr<int>());
})
);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
int labeled_chamfer_cuda_forward(const at::Tensor& xyz1, const at::Tensor& xyz2, const at::Tensor& label1, const at::Tensor& label2,
at::Tensor& dist1, at::Tensor& dist2, at::Tensor& idx1, at::Tensor& idx2){
const int batch_size = xyz1.size(0);
const int n = xyz1.size(1); //num_points point cloud A
const int m = xyz2.size(1); //num_points point cloud B
const int c = xyz1.size(2); //point dimension
CHECK_EQ(xyz2.size(2), c);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
xyz1.scalar_type(), "NmDistanceKernel", ([&] {
LabeledNmDistanceKernel<scalar_t><<<dim3(batch_size,16,1),BATCH,BATCH*(c+1)*sizeof(scalar_t)>>>(batch_size, n, c, xyz1.data_ptr<scalar_t>(), label1.toType(xyz1.scalar_type()).data_ptr<scalar_t>(), m,
xyz2.data_ptr<scalar_t>(), label2.toType(xyz1.scalar_type()).data_ptr<scalar_t>(), dist1.data_ptr<scalar_t>(), idx1.data_ptr<int>());
LabeledNmDistanceKernel<scalar_t><<<dim3(batch_size,16,1),BATCH,BATCH*(c+1)*sizeof(scalar_t)>>>(batch_size, m, c, xyz2.data_ptr<scalar_t>(), label2.toType(xyz1.scalar_type()).data_ptr<scalar_t>(), n,
xyz1.data_ptr<scalar_t>(), label1.toType(xyz1.scalar_type()).data_ptr<scalar_t>(), dist2.data_ptr<scalar_t>(), idx2.data_ptr<int>());
})
);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
template <typename scalar_t>
__global__ void NmDistanceGradKernel(int b,int n,int c,const scalar_t * xyz1,int m,const scalar_t * xyz2,
const scalar_t * grad_dist1, const int * idx1, scalar_t * grad_xyz1, scalar_t * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
int j2=idx1[i*n+j];
// ignore negative indices (for labeled_nmdistance points can have no closest neighbors!)
if (j2 < 0) continue;
scalar_t g=grad_dist1[i*n+j]*2;
for (int _c = 0; _c < c; _c++)
{
scalar_t xyz_g = g*(xyz1[(i*n+j)*c+_c]-xyz2[(i*m+j2)*c+_c]);
atomicAdd(&(grad_xyz1[(i*n+j)*c+_c]), xyz_g);
atomicAdd(&(grad_xyz2[(i*m+j2)*c+_c]),-xyz_g);
}
}
}
}
// xyz1 (b,n,c)
// xyz2 (b,m,c)
// gradxyz1 (b,n,3)
// gradxyz2 (b,m,3)
// graddist1 (b,n)
// graddist2 (b,m)
// idx1 (b,n) idx for the closest neighbor of each point in xyz1
// idx2 (b,m) ... xyz2
int chamfer_cuda_backward(at::Tensor& xyz1, at::Tensor& xyz2, at::Tensor& gradxyz1, at::Tensor& gradxyz2, at::Tensor& graddist1, at::Tensor& graddist2, at::Tensor& idx1, at::Tensor& idx2){
// cudaMemset(grad_xyz1,0,b*n*3*4);
// cudaMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
const auto c = xyz1.size(2); //point dimension
// set to zero
gradxyz1.zero_();
gradxyz2.zero_();
CHECK_EQ(xyz2.size(2), c);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
xyz1.scalar_type(), "NmDistanceGradKernel", ([&] {
NmDistanceGradKernel<scalar_t><<<dim3(batch_size,16,1),256>>>(batch_size,n,c,xyz1.data_ptr<scalar_t>(),m,xyz2.data_ptr<scalar_t>(),graddist1.data_ptr<scalar_t>(),idx1.data_ptr<int>(),gradxyz1.data_ptr<scalar_t>(),gradxyz2.data_ptr<scalar_t>());
NmDistanceGradKernel<scalar_t><<<dim3(batch_size,16,1),256>>>(batch_size,m,c,xyz2.data_ptr<scalar_t>(),n,xyz1.data_ptr<scalar_t>(),graddist2.data_ptr<scalar_t>(),idx2.data_ptr<int>(),gradxyz2.data_ptr<scalar_t>(),gradxyz1.data_ptr<scalar_t>());
})
);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd get grad: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
|
598d4490b9f6c44562d0363dacb16edc30ab2cf0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int i = blockIdx.x;
int j = blockIdx.y;
//optimized version with more higher number of threads used per block
//int i = (blockIdx.x*blockDim.x) + threadIdx.x;
//int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int index = j*numCols + i;
greyImage[index] = .299f * rgbaImage[index].x + .587f * rgbaImage[index].y + .114f * rgbaImage[index].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//const dim3 blockSize(15, 2,1);
//const dim3 gridSize( numCols/blockSize.x, numRows/blockSize.y, 1);
const dim3 blockSize(1, 1,1);
const dim3 gridSize( numCols, numRows, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
|
598d4490b9f6c44562d0363dacb16edc30ab2cf0.cu
|
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int i = blockIdx.x;
int j = blockIdx.y;
//optimized version with more higher number of threads used per block
//int i = (blockIdx.x*blockDim.x) + threadIdx.x;
//int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int index = j*numCols + i;
greyImage[index] = .299f * rgbaImage[index].x + .587f * rgbaImage[index].y + .114f * rgbaImage[index].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//const dim3 blockSize(15, 2,1);
//const dim3 gridSize( numCols/blockSize.x, numRows/blockSize.y, 1);
const dim3 blockSize(1, 1,1);
const dim3 gridSize( numCols, numRows, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
bfe8b363efe14f8874f8934c66bfb387b301d9b2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3) {
if (comp <= (-0.0f - var_1 - +1.9945E-44f / -1.8945E36f)) {
float tmp_1 = (+1.6933E34f - (-1.3140E20f / +1.2012E-26f));
float tmp_2 = -1.5160E-43f;
comp = tmp_2 / tmp_1 * var_2 * var_3;
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4);
hipDeviceSynchronize();
return 0;
}
|
bfe8b363efe14f8874f8934c66bfb387b301d9b2.cu
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3) {
if (comp <= (-0.0f - var_1 - +1.9945E-44f / -1.8945E36f)) {
float tmp_1 = (+1.6933E34f - (-1.3140E20f / +1.2012E-26f));
float tmp_2 = -1.5160E-43f;
comp = tmp_2 / tmp_1 * var_2 * var_3;
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4);
cudaDeviceSynchronize();
return 0;
}
|
93ac56281b6d63115833d079e202ad5209716ad3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <hiprand/hiprand_kernel.h>
#include <time.h>
using namespace std;
#define N 200 // The xy Dimensions/Size of the Two Input Matrices
#define blockSize 32
// MatrixMultiply Kernel
// Assumes N Blocks with 32 Threads Each
__global__ void MatrixMultiply(int *X, int *Y, int *Z, N){
// Store Elements in Shared Memory Vars
__shared__ matrixA[N];
__shared__ matrixB[N];
// Need to account for different smx tid's
int Xtid = blockIdx.x * blockDim.x + threadIdx.x;
int Ytid = blockIdx.y * blockDim.y + threadIdx.y;
// Copy Matrix from Global to Shared Memory
for (int i = 0; i < N; i++) {
matrixA[i] = X[i];
matrixB[i] = Y[i];
}
// Ensure Copy is Complete by Syncing
__syncthreads();
// Assuming Square Matrices
float floatingPointSum = 0.00f;
// Do the Multiplication
for (int i = 0; i < N; i++) {
// Sum Each Corresponding Multiplication, Using Shared Copies of Matrices
floatingPointSum = floatingPointSum + (matrixA[(Xtid * N) + i] * matrixB[(i * N) + Ytid]);
}
// Sync Again
__syncthreads();
// Put the Result in the Output Array
Z[(Xtid * N) + Ytid] = floatingPointSum;
}
int main(){
int X[N], Y[N], Z[N]; // Input Data: X, Y; Output Data: Z
int *dev_X, *dev_Y, *dev_Z; // Device data pointers
// Allocate Memory on the Device/GPU
hipMalloc((void**)&dev_X, N*sizeof(int));
hipMalloc((void**)&dev_Y, N*sizeof(int));
hipMalloc((void**)&dev_Z, N*sizeof(int));
// Fill Input Arrays that are Size N x N
int arrayLength = N * N;
for(int i = 0; i < arrayLength; i++){
X[i] = hiprand_uniform(&localState);
Y[i] = hiprand_uniform(&localState);
Z[i] = hiprand_uniform(&localState);
}
// Copy data to the device
hipMemcpy(dev_X,X,N*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_Y,Y,N*sizeof(int),hipMemcpyHostToDevice);
// Create Event
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Cuda Kernel Call
int gridSize = (N+(blockSize-1)) / blockSize;
// Call Event
hipEventRecord(start);
hipLaunchKernelGGL(( MatrixMultiply), dim3(gridSize),dim3(blockSize), 0, 0, dev_X, dev_Y, dev_Z, sN);
hipEventRecord(stop);
// Copy memory off of the device
hipMemcpy(Z, dev_Z, N*sizeof(int), hipMemcpyDeviceToHost);
// Stop Event
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << "Time Elapsed: " << milliseconds << endl;
// Check Contents of Working Arrays/Output Data
int checkValue;
for (int j = 0; j < arrayLength; j++) {
for (int i = 0; i < N; i++) { // Loop for Checking Each Value
checkValue = checkValue + (X[(i * N) + i] * Y[(i * N) + i]);
}
if (Z[i] != checkValue) {
cout << "Mismatch " << i << endl;
}
}
// Free Memory
hipFree(dev_X);
hipFree(dev_Y);
hipFree(dev_Z);
}
|
93ac56281b6d63115833d079e202ad5209716ad3.cu
|
#include <iostream>
#include <math.h>
#include <curand_kernel.h>
#include <time.h>
using namespace std;
#define N 200 // The xy Dimensions/Size of the Two Input Matrices
#define blockSize 32
// MatrixMultiply Kernel
// Assumes N Blocks with 32 Threads Each
__global__ void MatrixMultiply(int *X, int *Y, int *Z, N){
// Store Elements in Shared Memory Vars
__shared__ matrixA[N];
__shared__ matrixB[N];
// Need to account for different smx tid's
int Xtid = blockIdx.x * blockDim.x + threadIdx.x;
int Ytid = blockIdx.y * blockDim.y + threadIdx.y;
// Copy Matrix from Global to Shared Memory
for (int i = 0; i < N; i++) {
matrixA[i] = X[i];
matrixB[i] = Y[i];
}
// Ensure Copy is Complete by Syncing
__syncthreads();
// Assuming Square Matrices
float floatingPointSum = 0.00f;
// Do the Multiplication
for (int i = 0; i < N; i++) {
// Sum Each Corresponding Multiplication, Using Shared Copies of Matrices
floatingPointSum = floatingPointSum + (matrixA[(Xtid * N) + i] * matrixB[(i * N) + Ytid]);
}
// Sync Again
__syncthreads();
// Put the Result in the Output Array
Z[(Xtid * N) + Ytid] = floatingPointSum;
}
int main(){
int X[N], Y[N], Z[N]; // Input Data: X, Y; Output Data: Z
int *dev_X, *dev_Y, *dev_Z; // Device data pointers
// Allocate Memory on the Device/GPU
cudaMalloc((void**)&dev_X, N*sizeof(int));
cudaMalloc((void**)&dev_Y, N*sizeof(int));
cudaMalloc((void**)&dev_Z, N*sizeof(int));
// Fill Input Arrays that are Size N x N
int arrayLength = N * N;
for(int i = 0; i < arrayLength; i++){
X[i] = curand_uniform(&localState);
Y[i] = curand_uniform(&localState);
Z[i] = curand_uniform(&localState);
}
// Copy data to the device
cudaMemcpy(dev_X,X,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_Y,Y,N*sizeof(int),cudaMemcpyHostToDevice);
// Create Event
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Cuda Kernel Call
int gridSize = (N+(blockSize-1)) / blockSize;
// Call Event
cudaEventRecord(start);
MatrixMultiply<<<gridSize,blockSize>>>(dev_X, dev_Y, dev_Z, sN);
cudaEventRecord(stop);
// Copy memory off of the device
cudaMemcpy(Z, dev_Z, N*sizeof(int), cudaMemcpyDeviceToHost);
// Stop Event
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "Time Elapsed: " << milliseconds << endl;
// Check Contents of Working Arrays/Output Data
int checkValue;
for (int j = 0; j < arrayLength; j++) {
for (int i = 0; i < N; i++) { // Loop for Checking Each Value
checkValue = checkValue + (X[(i * N) + i] * Y[(i * N) + i]);
}
if (Z[i] != checkValue) {
cout << "Mismatch " << i << endl;
}
}
// Free Memory
cudaFree(dev_X);
cudaFree(dev_Y);
cudaFree(dev_Z);
}
|
4dff6c3c9620dd553667ed705b083e5368f35c25.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, Miroslav Stoyanov
*
* This file is part of
* Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
* and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED.
* THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT,
* COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE.
* THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF,
* IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE.
*/
#ifndef __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#define __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#include "tsgAcceleratedDataStructures.hpp"
#include "tsgCudaLinearAlgebra.hpp"
#include "tsgCudaBasisEvaluations.hpp"
// several kernels assume a linear distribution of the threads and can be executed with "practically unlimited" number of threads
// thus we can set this to the CUDA max number of threads, based on the current cuda version
constexpr int _MAX_CUDA_THREADS = 1024;
namespace TasGrid{
void TasCUDA::dtrans2can(bool use01, int dims, int num_x, int pad_size, const double *gpu_trans_a, const double *gpu_trans_b, const double *gpu_x_transformed, double *gpu_x_canonical){
int num_blocks = (num_x * dims) / _MAX_CUDA_THREADS + (((num_x * dims) % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tasgpu_transformed_to_canonical<double, double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), (2*pad_size) * sizeof(double), 0, dims, num_x, pad_size, gpu_trans_a, gpu_trans_b, gpu_x_transformed, gpu_x_canonical);
if (use01)hipLaunchKernelGGL(( tasgpu_m11_to_01<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims * num_x, gpu_x_canonical);
}
// local polynomial basis functions, DENSE algorithm
void TasCUDA::devalpwpoly(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x, const double *gpu_nodes, const double *gpu_support, double *gpu_y){
// each block thread runs 1024 threads and processes 32 points (or basis functions)
int num_blocks = (num_points / 32) + ((num_points % 32 == 0) ? 0 : 1);
// order == 1 is considered "default" so that the compiler doesn't complain about missing default statement
// semilocalp cannot have order less than 2, only rule_localp can have order 0 (this gets overwrittein in makeLocalPolynomialGrid())
if (rule == rule_localp){
switch(order){
case 0:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 0, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 1, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localp0){
switch(order){
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_localp0, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 1, rule_localp0, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localpb){
switch(order){
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_localpb, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 1, rule_localpb, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_semilocalp){
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_semilocalp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}else{ // rule == wavelet
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 1, rule_wavelet, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}
// there is a switch statement that realizes templates for each combination of rule/order
// make one function that covers that switch, the rest is passed from devalpwpoly_sparse
template<typename T, int THREADS, int TOPLEVEL, bool fill>
inline void devalpwpoly_sparse_realize_rule_order(int order, TypeOneDRule rule,
int dims, int num_x, int num_points,
const T *x, const T *nodes, const T *support,
const int *hpntr, const int *hindx, int num_roots, const int *roots,
int *spntr, int *sindx, T *svals){
int num_blocks = num_x / THREADS + ((num_x % THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
if (rule == rule_localp){
switch(order){
case 0:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 0, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localp0){
switch(order){
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp0, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp0, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localpb){
switch(order){
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localpb, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localpb, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else{ // rule == rule_semilocalp
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_semilocalp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}
// local polynomial basis functions, SPARSE algorithm (2 passes, one pass to compue the non-zeros and one pass to evaluate)
void TasCUDA::devalpwpoly_sparse(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x,
const CudaVector<double> &gpu_nodes, const CudaVector<double> &gpu_support,
const CudaVector<int> &gpu_hpntr, const CudaVector<int> &gpu_hindx, const CudaVector<int> &gpu_hroots,
CudaVector<int> &gpu_spntr, CudaVector<int> &gpu_sindx, CudaVector<double> &gpu_svals){
gpu_spntr.resize(num_x + 1);
// call with fill == false to count the non-zeros per row of the matrix
devalpwpoly_sparse_realize_rule_order<double, 64, 46, false>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), 0, 0);
std::vector<int> cpu_spntr;
gpu_spntr.unload(cpu_spntr);
cpu_spntr[0] = 0;
int nz = 0;
for(auto &i : cpu_spntr){
i += nz;
nz = i;
}
gpu_spntr.load(cpu_spntr);
gpu_sindx.resize(nz);
gpu_svals.resize(nz);
// call with fill == true to load the non-zeros
devalpwpoly_sparse_realize_rule_order<double, 64, 46, true>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), gpu_sindx.data(), gpu_svals.data());
}
// Sequence Grid basis evaluations
void TasCUDA::devalseq(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes,
const CudaVector<int> &points, const CudaVector<double> &nodes, const CudaVector<double> &coeffs, double *gpu_result){
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + num_x * (max_levels[d-1] + 1);
size_t num_total = offsets[dims-1] + num_x * (max_levels[dims-1] + 1);
int maxl = max_levels[0]; for(auto l : max_levels) if (maxl < l) maxl = l;
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dseq_build_cache<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, gpu_x, nodes.data(), coeffs.data(), maxl+1, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dseq_eval_sharedpoints<double, 32>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_result);
}
// Fourier Grid basis evaluations
void TasCUDA::devalfor(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes, const CudaVector<int> &points, double *gpu_wreal, double *gpu_wimag){
std::vector<int> max_nodes(dims);
for(int j=0; j<dims; j++){
int n = 1;
for(int i=0; i<max_levels[j]; i++) n *= 3;
max_nodes[j] = n;
}
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + 2 * num_x * (max_nodes[d-1] + 1);
size_t num_total = offsets[dims-1] + 2 * num_x * (max_nodes[dims-1] + 1);
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dfor_build_cache<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, gpu_x, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
if (gpu_wimag == 0){
hipLaunchKernelGGL(( tasgpu_dfor_eval_sharedpoints<double, 32, true>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, 0);
}else{
hipLaunchKernelGGL(( tasgpu_dfor_eval_sharedpoints<double, 32, false>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, gpu_wimag);
}
}
void TasCUDA::devalglo(bool is_nested, bool is_clenshawcurtis0, int dims, int num_x, int num_p, int num_basis,
double const *gpu_x, CudaVector<double> const &nodes, CudaVector<double> const &coeff, CudaVector<double> const &tensor_weights,
CudaVector<int> const &nodes_per_level, CudaVector<int> const &offset_per_level, CudaVector<int> const &map_dimension, CudaVector<int> const &map_level,
CudaVector<int> const &active_tensors, CudaVector<int> const &active_num_points, CudaVector<int> const &dim_offsets,
CudaVector<int> const &map_tensor, CudaVector<int> const &map_index, CudaVector<int> const &map_reference, double *gpu_result){
CudaVector<double> cache(num_x, num_basis);
int num_blocks = (int) map_dimension.size();
if (num_blocks >= 65536) num_blocks = 65536;
if (is_nested){
if (is_clenshawcurtis0){
hipLaunchKernelGGL(( tasgpu_dglo_build_cache<double, _MAX_CUDA_THREADS, true, true>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}else{
hipLaunchKernelGGL(( tasgpu_dglo_build_cache<double, _MAX_CUDA_THREADS, true, false>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}
}else{
hipLaunchKernelGGL(( tasgpu_dglo_build_cache<double, _MAX_CUDA_THREADS, false, false>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}
int mat_size = num_x * num_p;
num_blocks = num_x / _MAX_CUDA_THREADS + ((mat_size % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tasgpu_dglo_eval_zero<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, mat_size, gpu_result);
num_blocks = (int) map_tensor.size();
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tasgpu_dglo_eval_sharedpoints<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, (int) map_tensor.size(), num_p, cache.data(),
tensor_weights.data(), offset_per_level.data(), dim_offsets.data(), active_tensors.data(), active_num_points.data(),
map_tensor.data(), map_index.data(), map_reference.data(), gpu_result);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Linear Algebra
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef __TASMANIAN_COMPILE_FALLBACK_CUDA_KERNELS__
void TasCUDA::cudaDgemm(int M, int N, int K, const double *gpu_a, const double *gpu_b, double *gpu_c){ // gpu_c = gpu_a * gpu_b, gpu_c is M by N
int blocks = (N / 96) + (((N % 96) == 0) ? 0 : 1);
blocks *= (M / 96) + (((M % 96) == 0) ? 0 : 1);
while(blocks > 65536) blocks = 65536;
hipLaunchKernelGGL(( tasgpu_cudaTgemm<double, 32, 96>), dim3(blocks), dim3(1024), 0, 0, M, N, K, gpu_a, gpu_b, gpu_c);
}
void TasCUDA::cudaSparseMatmul(int M, int N, int num_nz, const int* gpu_spntr, const int* gpu_sindx, const double* gpu_svals, const double *gpu_B, double *gpu_C){
int blocks = M / 64 + ((M % 64 == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_sparse_matmul<double, 64>), dim3(blocks), dim3(64), 0, 0, M, N, num_nz, gpu_spntr, gpu_sindx, gpu_svals, gpu_B, gpu_C);
}
void TasCUDA::cudaSparseVecDenseMat(int M, int N, int num_nz, const double *A, const int *indx, const double *vals, double *C){
int num_blocks = N / _MAX_CUDA_THREADS + ((N % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks< 65536){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 1>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (2 * _MAX_CUDA_THREADS) + ((N % (2 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 2>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (3 * _MAX_CUDA_THREADS) + ((N % (3 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 3>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}
}
}
}
void TasCUDA::convert_sparse_to_dense(int num_rows, int num_columns, const int *pntr, const int *indx, const double *vals, double *destination){
int n = num_rows * num_columns;
int num_blocks = n / _MAX_CUDA_THREADS + ((n % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tascuda_fill<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, n, 0.0, destination);
num_blocks = num_rows;
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tascuda_sparse_to_dense<double, 64>), dim3(num_blocks), dim3(64), 0, 0, num_rows, num_columns, pntr, indx, vals, destination);
}
#endif
}
#endif
|
4dff6c3c9620dd553667ed705b083e5368f35c25.cu
|
/*
* Copyright (c) 2017, Miroslav Stoyanov
*
* This file is part of
* Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
* and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED.
* THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT,
* COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE.
* THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF,
* IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE.
*/
#ifndef __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#define __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#include "tsgAcceleratedDataStructures.hpp"
#include "tsgCudaLinearAlgebra.hpp"
#include "tsgCudaBasisEvaluations.hpp"
// several kernels assume a linear distribution of the threads and can be executed with "practically unlimited" number of threads
// thus we can set this to the CUDA max number of threads, based on the current cuda version
constexpr int _MAX_CUDA_THREADS = 1024;
namespace TasGrid{
void TasCUDA::dtrans2can(bool use01, int dims, int num_x, int pad_size, const double *gpu_trans_a, const double *gpu_trans_b, const double *gpu_x_transformed, double *gpu_x_canonical){
int num_blocks = (num_x * dims) / _MAX_CUDA_THREADS + (((num_x * dims) % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
tasgpu_transformed_to_canonical<double, double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS, (2*pad_size) * sizeof(double)>>>(dims, num_x, pad_size, gpu_trans_a, gpu_trans_b, gpu_x_transformed, gpu_x_canonical);
if (use01) tasgpu_m11_to_01<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(dims * num_x, gpu_x_canonical);
}
// local polynomial basis functions, DENSE algorithm
void TasCUDA::devalpwpoly(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x, const double *gpu_nodes, const double *gpu_support, double *gpu_y){
// each block thread runs 1024 threads and processes 32 points (or basis functions)
int num_blocks = (num_points / 32) + ((num_points % 32 == 0) ? 0 : 1);
// order == 1 is considered "default" so that the compiler doesn't complain about missing default statement
// semilocalp cannot have order less than 2, only rule_localp can have order 0 (this gets overwrittein in makeLocalPolynomialGrid())
if (rule == rule_localp){
switch(order){
case 0:
tasgpu_devalpwpoly<double, 0, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
case 2: tasgpu_devalpwpoly<double, 2, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<double, 1, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localp0){
switch(order){
case 2: tasgpu_devalpwpoly<double, 2, rule_localp0, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<double, 1, rule_localp0, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localpb){
switch(order){
case 2: tasgpu_devalpwpoly<double, 2, rule_localpb, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<double, 1, rule_localpb, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_semilocalp){
tasgpu_devalpwpoly<double, 2, rule_semilocalp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}else{ // rule == wavelet
tasgpu_devalpwpoly<double, 1, rule_wavelet, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}
// there is a switch statement that realizes templates for each combination of rule/order
// make one function that covers that switch, the rest is passed from devalpwpoly_sparse
template<typename T, int THREADS, int TOPLEVEL, bool fill>
inline void devalpwpoly_sparse_realize_rule_order(int order, TypeOneDRule rule,
int dims, int num_x, int num_points,
const T *x, const T *nodes, const T *support,
const int *hpntr, const int *hindx, int num_roots, const int *roots,
int *spntr, int *sindx, T *svals){
int num_blocks = num_x / THREADS + ((num_x % THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
if (rule == rule_localp){
switch(order){
case 0:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 0, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localp0){
switch(order){
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp0, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp0, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localpb){
switch(order){
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localpb, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localpb, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else{ // rule == rule_semilocalp
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_semilocalp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}
// local polynomial basis functions, SPARSE algorithm (2 passes, one pass to compue the non-zeros and one pass to evaluate)
void TasCUDA::devalpwpoly_sparse(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x,
const CudaVector<double> &gpu_nodes, const CudaVector<double> &gpu_support,
const CudaVector<int> &gpu_hpntr, const CudaVector<int> &gpu_hindx, const CudaVector<int> &gpu_hroots,
CudaVector<int> &gpu_spntr, CudaVector<int> &gpu_sindx, CudaVector<double> &gpu_svals){
gpu_spntr.resize(num_x + 1);
// call with fill == false to count the non-zeros per row of the matrix
devalpwpoly_sparse_realize_rule_order<double, 64, 46, false>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), 0, 0);
std::vector<int> cpu_spntr;
gpu_spntr.unload(cpu_spntr);
cpu_spntr[0] = 0;
int nz = 0;
for(auto &i : cpu_spntr){
i += nz;
nz = i;
}
gpu_spntr.load(cpu_spntr);
gpu_sindx.resize(nz);
gpu_svals.resize(nz);
// call with fill == true to load the non-zeros
devalpwpoly_sparse_realize_rule_order<double, 64, 46, true>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), gpu_sindx.data(), gpu_svals.data());
}
// Sequence Grid basis evaluations
void TasCUDA::devalseq(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes,
const CudaVector<int> &points, const CudaVector<double> &nodes, const CudaVector<double> &coeffs, double *gpu_result){
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + num_x * (max_levels[d-1] + 1);
size_t num_total = offsets[dims-1] + num_x * (max_levels[dims-1] + 1);
int maxl = max_levels[0]; for(auto l : max_levels) if (maxl < l) maxl = l;
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
tasgpu_dseq_build_cache<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, gpu_x, nodes.data(), coeffs.data(), maxl+1, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
tasgpu_dseq_eval_sharedpoints<double, 32><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_result);
}
// Fourier Grid basis evaluations
void TasCUDA::devalfor(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes, const CudaVector<int> &points, double *gpu_wreal, double *gpu_wimag){
std::vector<int> max_nodes(dims);
for(int j=0; j<dims; j++){
int n = 1;
for(int i=0; i<max_levels[j]; i++) n *= 3;
max_nodes[j] = n;
}
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + 2 * num_x * (max_nodes[d-1] + 1);
size_t num_total = offsets[dims-1] + 2 * num_x * (max_nodes[dims-1] + 1);
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
tasgpu_dfor_build_cache<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, gpu_x, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
if (gpu_wimag == 0){
tasgpu_dfor_eval_sharedpoints<double, 32, true><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, 0);
}else{
tasgpu_dfor_eval_sharedpoints<double, 32, false><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, gpu_wimag);
}
}
void TasCUDA::devalglo(bool is_nested, bool is_clenshawcurtis0, int dims, int num_x, int num_p, int num_basis,
double const *gpu_x, CudaVector<double> const &nodes, CudaVector<double> const &coeff, CudaVector<double> const &tensor_weights,
CudaVector<int> const &nodes_per_level, CudaVector<int> const &offset_per_level, CudaVector<int> const &map_dimension, CudaVector<int> const &map_level,
CudaVector<int> const &active_tensors, CudaVector<int> const &active_num_points, CudaVector<int> const &dim_offsets,
CudaVector<int> const &map_tensor, CudaVector<int> const &map_index, CudaVector<int> const &map_reference, double *gpu_result){
CudaVector<double> cache(num_x, num_basis);
int num_blocks = (int) map_dimension.size();
if (num_blocks >= 65536) num_blocks = 65536;
if (is_nested){
if (is_clenshawcurtis0){
tasgpu_dglo_build_cache<double, _MAX_CUDA_THREADS, true, true><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}else{
tasgpu_dglo_build_cache<double, _MAX_CUDA_THREADS, true, false><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}
}else{
tasgpu_dglo_build_cache<double, _MAX_CUDA_THREADS, false, false><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}
int mat_size = num_x * num_p;
num_blocks = num_x / _MAX_CUDA_THREADS + ((mat_size % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
tasgpu_dglo_eval_zero<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(mat_size, gpu_result);
num_blocks = (int) map_tensor.size();
if (num_blocks >= 65536) num_blocks = 65536;
tasgpu_dglo_eval_sharedpoints<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, (int) map_tensor.size(), num_p, cache.data(),
tensor_weights.data(), offset_per_level.data(), dim_offsets.data(), active_tensors.data(), active_num_points.data(),
map_tensor.data(), map_index.data(), map_reference.data(), gpu_result);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Linear Algebra
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef __TASMANIAN_COMPILE_FALLBACK_CUDA_KERNELS__
void TasCUDA::cudaDgemm(int M, int N, int K, const double *gpu_a, const double *gpu_b, double *gpu_c){ // gpu_c = gpu_a * gpu_b, gpu_c is M by N
int blocks = (N / 96) + (((N % 96) == 0) ? 0 : 1);
blocks *= (M / 96) + (((M % 96) == 0) ? 0 : 1);
while(blocks > 65536) blocks = 65536;
tasgpu_cudaTgemm<double, 32, 96><<<blocks, 1024>>>(M, N, K, gpu_a, gpu_b, gpu_c);
}
void TasCUDA::cudaSparseMatmul(int M, int N, int num_nz, const int* gpu_spntr, const int* gpu_sindx, const double* gpu_svals, const double *gpu_B, double *gpu_C){
int blocks = M / 64 + ((M % 64 == 0) ? 0 : 1);
tasgpu_sparse_matmul<double, 64><<<blocks, 64>>>(M, N, num_nz, gpu_spntr, gpu_sindx, gpu_svals, gpu_B, gpu_C);
}
void TasCUDA::cudaSparseVecDenseMat(int M, int N, int num_nz, const double *A, const int *indx, const double *vals, double *C){
int num_blocks = N / _MAX_CUDA_THREADS + ((N % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks< 65536){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 1><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (2 * _MAX_CUDA_THREADS) + ((N % (2 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 2><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (3 * _MAX_CUDA_THREADS) + ((N % (3 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 3><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}
}
}
}
void TasCUDA::convert_sparse_to_dense(int num_rows, int num_columns, const int *pntr, const int *indx, const double *vals, double *destination){
int n = num_rows * num_columns;
int num_blocks = n / _MAX_CUDA_THREADS + ((n % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
tascuda_fill<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(n, 0.0, destination);
num_blocks = num_rows;
if (num_blocks >= 65536) num_blocks = 65536;
tascuda_sparse_to_dense<double, 64><<<num_blocks, 64>>>(num_rows, num_columns, pntr, indx, vals, destination);
}
#endif
}
#endif
|
57ca677c8013a2c433ba4563b54ca8a732e59c1c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
To Compile:
nvcc 2039281_Task3_B.cu lodepng.cpp -o task3_B
To Run:
./task3_B hck.png
To Store Output:
./task3_B hck.png > CUDA_Gaussian_output.txt
/*****************************************************
BY Subin Shrestha
ID 2039281
--Image blur using gaussian blur
--Image is taken at runtime
--Make sure to include lodepng.cpp and lodepng.h file in the same folder as this file
******************************************************/
#include "lodepng.h"
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime_api.h>
//To calculate time
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if (dn < 0)
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
//Global Device Variable
__device__ unsigned int width;
__device__ unsigned int height;
//Device Function to get requred red color from the image
__device__ unsigned char getRed(unsigned char *image, unsigned int row, unsigned int col){
unsigned int i = (row * width * 4) + (col * 4);
return image[i];
}
//Device Function to get requred green color from the image
__device__ unsigned char getGreen(unsigned char *image, unsigned int row, unsigned int col){
unsigned int i = (row * width * 4) + (col * 4) +1;
return image[i];
}
//Device Function to get requred blue color from the image
__device__ unsigned char getBlue(unsigned char *image, unsigned int row, unsigned int col){
unsigned int i = (row * width * 4) + (col * 4) +2;
return image[i];
}
//Device Function to get requred alpha channel from the image
__device__ unsigned char getAlpha(unsigned char *image, unsigned int row, unsigned int col){
unsigned int i = (row * width * 4) + (col * 4) +3;
return image[i];
}
//Device function to set red
__device__ void setRed(unsigned char *image, unsigned int row, unsigned int col, unsigned char red){
unsigned int i = (row * width * 4) + (col * 4);
image[i] = red;
}
//Device Function to set Green
__device__ void setGreen(unsigned char *image, unsigned int row, unsigned int col, unsigned char green){
unsigned int i = (row * width * 4) + (col * 4) +1;
image[i] = green;
}
//Device function to set Blue
__device__ void setBlue(unsigned char *image, unsigned int row, unsigned int col, unsigned char blue){
unsigned int i = (row * width * 4) + (col * 4) +2;
image[i] = blue;
}
//Device funtion to set alpha
__device__ void setAlpha(unsigned char *image, unsigned int row, unsigned int col, unsigned char alpha){
unsigned int i = (row * width * 4) + (col * 4) +3;
image[i] = alpha;
}
//Main gunction to Blur the image in device
//Does not blur if the axis is zero to ignore the pixel at the edges
__global__ void cudaBlur(unsigned char* img_in, unsigned char* newImage){
if(blockIdx.x == 0 || threadIdx.x == 0 ){
}
else{
//Weight matrix to apply blur.
float filter[3][3] = {
{ 1.0/16, 2.0/16, 1.0/16 },
{ 2.0/16, 4.0/16, 2.0/16 },
{ 1.0/16, 2.0/16, 1.0/16 }};
//variables
unsigned redTL,redTC, redTR;
unsigned redL, redC, redR;
unsigned redBL,redBC, redBR;
unsigned newRed;
unsigned greenTL,greenTC, greenTR;
unsigned greenL, greenC, greenR;
unsigned greenBL,greenBC, greenBR;
unsigned newGreen;
unsigned blueTL,blueTC, blueTR;
unsigned blueL, blueC, blueR;
unsigned blueBL,blueBC, blueBR;
unsigned newBlue;
int row = threadIdx.x;
int col = blockIdx.x;
setGreen(newImage, row, col, getGreen(img_in, row, col));
setBlue(newImage, row, col, getBlue(img_in, row, col));
setAlpha(newImage, row, col, 255);
redTL = getRed(img_in, row-1, col-1);
redTC = getRed(img_in, row-1, col);
redTR = getRed(img_in, row-1, col+1);
redL = getRed(img_in, row, col-1);
redC = getRed(img_in, row, col);
redR = getRed(img_in, row, col+1);
redBL = getRed(img_in, row+1, col-1);
redBC = getRed(img_in, row+1, col);
redBR = getRed(img_in, row+1, col+1);
//Calculating new values for Red
newRed = redTL*filter[0][0] + redTC*filter[0][1] + redTR*filter[0][2]
+ redL*filter[1][0] + redC*filter[1][1] + redR*filter[1][2]
+ redBL*filter[2][0] + redBC*filter[2][1] + redBR*filter[2][2];
//setting new values for red
setRed(newImage, row, col, newRed);
greenTL = getGreen(img_in, row-1, col-1);
greenTC = getGreen(img_in, row-1, col);
greenTR = getGreen(img_in, row-1, col+1);
greenL = getGreen(img_in, row, col-1);
greenC = getGreen(img_in, row, col);
greenR = getGreen(img_in, row, col+1);
greenBL = getGreen(img_in, row+1, col-1);
greenBC = getGreen(img_in, row+1, col);
greenBR = getGreen(img_in, row+1, col+1);
//Calculating new values for Green
newGreen = greenTL*filter[0][0] + greenTC*filter[0][1] + greenTR*filter[0][2]
+ greenL*filter[1][0] + greenC*filter[1][1] + greenR*filter[1][2]
+ greenBL*filter[2][0] + greenBC*filter[2][1] + greenBR*filter[2][2];
//Setting new values for green
setGreen(newImage, row, col, newGreen);
blueTL = getBlue(img_in, row-1, col-1);
blueTC = getBlue(img_in, row-1, col);
blueTR = getBlue(img_in, row-1, col+1);
blueL = getBlue(img_in, row, col-1);
blueC = getBlue(img_in, row, col);
blueR = getBlue(img_in, row, col+1);
blueBL = getBlue(img_in, row+1, col-1);
blueBC = getBlue(img_in, row+1, col);
blueBR = getBlue(img_in, row+1, col+1);
//calculating new values for Blue
newBlue = blueTL*filter[0][0] + blueTC*filter[0][1] + blueTR*filter[0][2]
+ blueL*filter[1][0] + blueC*filter[1][1] + blueR*filter[1][2]
+ blueBL*filter[2][0] + blueBC*filter[2][1] + blueBR*filter[2][2];
//setting new values for blue
setBlue(newImage, row, col, newBlue);
}
}
//function that sets width and height in global device variables
__global__ void setDimentions(unsigned int givenWidth, unsigned int givenHeight){
width = givenWidth;
height = givenHeight;
}
//Main function
int main(int argc, char **argv){
for (int i = 0; i < 10; i++)
{
//Starting Clock
struct timespec start, finish;
long long int difference;
clock_gettime(CLOCK_MONOTONIC, &start);
//image variables
unsigned char *image;
unsigned int w;
unsigned int h;
//getting vthe image at runtime
const char* filename = argv[1];
const char* img_output = "bluredImage.png";
unsigned char* output_raw;
hipError_t error;
//decoding image file
lodepng_decode32_file(&image, &w, &h, filename);
printf("width = %d height = %d\n", w, h);
//calling function to set global device variables
hipLaunchKernelGGL(( setDimentions), dim3(1),dim3(1), 0, 0, w, h);
unsigned char * d_image;
const int ARRAY_BYTES = h*w*4 * sizeof(unsigned char);
output_raw = (unsigned char *)malloc(ARRAY_BYTES);
//Memory allocation and Copyting image into GPU memory
hipMalloc((void**) &d_image, ARRAY_BYTES);
hipMemcpy(d_image, image, ARRAY_BYTES, hipMemcpyHostToDevice);
//Defining and memory allocation for output image
unsigned char * d_out;
hipMalloc((void**) &d_out, ARRAY_BYTES);
//calling main device function to blur
hipLaunchKernelGGL(( cudaBlur), dim3(w-1), dim3(h-1), 0, 0, d_image, d_out);
//checking error
error = hipGetLastError();
if(error){
fprintf(stderr, "Kernel launch returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Declaration of cuda thread synchronize
hipDeviceSynchronize();
//Copyting output back tohost memory
hipMemcpy(output_raw, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
//Encoding image into output file
lodepng_encode32_file(img_output, output_raw, w, h);
//freeing variable
free(image);
//Stopping Clock
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &difference);
printf("run no %d lasted %lldns or %9.5lfs\n", i, difference, difference / 1000000000.0);
printf("##############################################\n");
}
return 0;
}
|
57ca677c8013a2c433ba4563b54ca8a732e59c1c.cu
|
/*
To Compile:
nvcc 2039281_Task3_B.cu lodepng.cpp -o task3_B
To Run:
./task3_B hck.png
To Store Output:
./task3_B hck.png > CUDA_Gaussian_output.txt
/*****************************************************
BY Subin Shrestha
ID 2039281
--Image blur using gaussian blur
--Image is taken at runtime
--Make sure to include lodepng.cpp and lodepng.h file in the same folder as this file
******************************************************/
#include "lodepng.h"
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
//To calculate time
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if (dn < 0)
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
//Global Device Variable
__device__ unsigned int width;
__device__ unsigned int height;
//Device Function to get requred red color from the image
__device__ unsigned char getRed(unsigned char *image, unsigned int row, unsigned int col){
unsigned int i = (row * width * 4) + (col * 4);
return image[i];
}
//Device Function to get requred green color from the image
__device__ unsigned char getGreen(unsigned char *image, unsigned int row, unsigned int col){
unsigned int i = (row * width * 4) + (col * 4) +1;
return image[i];
}
//Device Function to get requred blue color from the image
__device__ unsigned char getBlue(unsigned char *image, unsigned int row, unsigned int col){
unsigned int i = (row * width * 4) + (col * 4) +2;
return image[i];
}
//Device Function to get requred alpha channel from the image
__device__ unsigned char getAlpha(unsigned char *image, unsigned int row, unsigned int col){
unsigned int i = (row * width * 4) + (col * 4) +3;
return image[i];
}
//Device function to set red
__device__ void setRed(unsigned char *image, unsigned int row, unsigned int col, unsigned char red){
unsigned int i = (row * width * 4) + (col * 4);
image[i] = red;
}
//Device Function to set Green
__device__ void setGreen(unsigned char *image, unsigned int row, unsigned int col, unsigned char green){
unsigned int i = (row * width * 4) + (col * 4) +1;
image[i] = green;
}
//Device function to set Blue
__device__ void setBlue(unsigned char *image, unsigned int row, unsigned int col, unsigned char blue){
unsigned int i = (row * width * 4) + (col * 4) +2;
image[i] = blue;
}
//Device funtion to set alpha
__device__ void setAlpha(unsigned char *image, unsigned int row, unsigned int col, unsigned char alpha){
unsigned int i = (row * width * 4) + (col * 4) +3;
image[i] = alpha;
}
//Main gunction to Blur the image in device
//Does not blur if the axis is zero to ignore the pixel at the edges
__global__ void cudaBlur(unsigned char* img_in, unsigned char* newImage){
if(blockIdx.x == 0 || threadIdx.x == 0 ){
}
else{
//Weight matrix to apply blur.
float filter[3][3] = {
{ 1.0/16, 2.0/16, 1.0/16 },
{ 2.0/16, 4.0/16, 2.0/16 },
{ 1.0/16, 2.0/16, 1.0/16 }};
//variables
unsigned redTL,redTC, redTR;
unsigned redL, redC, redR;
unsigned redBL,redBC, redBR;
unsigned newRed;
unsigned greenTL,greenTC, greenTR;
unsigned greenL, greenC, greenR;
unsigned greenBL,greenBC, greenBR;
unsigned newGreen;
unsigned blueTL,blueTC, blueTR;
unsigned blueL, blueC, blueR;
unsigned blueBL,blueBC, blueBR;
unsigned newBlue;
int row = threadIdx.x;
int col = blockIdx.x;
setGreen(newImage, row, col, getGreen(img_in, row, col));
setBlue(newImage, row, col, getBlue(img_in, row, col));
setAlpha(newImage, row, col, 255);
redTL = getRed(img_in, row-1, col-1);
redTC = getRed(img_in, row-1, col);
redTR = getRed(img_in, row-1, col+1);
redL = getRed(img_in, row, col-1);
redC = getRed(img_in, row, col);
redR = getRed(img_in, row, col+1);
redBL = getRed(img_in, row+1, col-1);
redBC = getRed(img_in, row+1, col);
redBR = getRed(img_in, row+1, col+1);
//Calculating new values for Red
newRed = redTL*filter[0][0] + redTC*filter[0][1] + redTR*filter[0][2]
+ redL*filter[1][0] + redC*filter[1][1] + redR*filter[1][2]
+ redBL*filter[2][0] + redBC*filter[2][1] + redBR*filter[2][2];
//setting new values for red
setRed(newImage, row, col, newRed);
greenTL = getGreen(img_in, row-1, col-1);
greenTC = getGreen(img_in, row-1, col);
greenTR = getGreen(img_in, row-1, col+1);
greenL = getGreen(img_in, row, col-1);
greenC = getGreen(img_in, row, col);
greenR = getGreen(img_in, row, col+1);
greenBL = getGreen(img_in, row+1, col-1);
greenBC = getGreen(img_in, row+1, col);
greenBR = getGreen(img_in, row+1, col+1);
//Calculating new values for Green
newGreen = greenTL*filter[0][0] + greenTC*filter[0][1] + greenTR*filter[0][2]
+ greenL*filter[1][0] + greenC*filter[1][1] + greenR*filter[1][2]
+ greenBL*filter[2][0] + greenBC*filter[2][1] + greenBR*filter[2][2];
//Setting new values for green
setGreen(newImage, row, col, newGreen);
blueTL = getBlue(img_in, row-1, col-1);
blueTC = getBlue(img_in, row-1, col);
blueTR = getBlue(img_in, row-1, col+1);
blueL = getBlue(img_in, row, col-1);
blueC = getBlue(img_in, row, col);
blueR = getBlue(img_in, row, col+1);
blueBL = getBlue(img_in, row+1, col-1);
blueBC = getBlue(img_in, row+1, col);
blueBR = getBlue(img_in, row+1, col+1);
//calculating new values for Blue
newBlue = blueTL*filter[0][0] + blueTC*filter[0][1] + blueTR*filter[0][2]
+ blueL*filter[1][0] + blueC*filter[1][1] + blueR*filter[1][2]
+ blueBL*filter[2][0] + blueBC*filter[2][1] + blueBR*filter[2][2];
//setting new values for blue
setBlue(newImage, row, col, newBlue);
}
}
//function that sets width and height in global device variables
__global__ void setDimentions(unsigned int givenWidth, unsigned int givenHeight){
width = givenWidth;
height = givenHeight;
}
//Main function
int main(int argc, char **argv){
for (int i = 0; i < 10; i++)
{
//Starting Clock
struct timespec start, finish;
long long int difference;
clock_gettime(CLOCK_MONOTONIC, &start);
//image variables
unsigned char *image;
unsigned int w;
unsigned int h;
//getting vthe image at runtime
const char* filename = argv[1];
const char* img_output = "bluredImage.png";
unsigned char* output_raw;
cudaError_t error;
//decoding image file
lodepng_decode32_file(&image, &w, &h, filename);
printf("width = %d height = %d\n", w, h);
//calling function to set global device variables
setDimentions<<<1,1>>>(w, h);
unsigned char * d_image;
const int ARRAY_BYTES = h*w*4 * sizeof(unsigned char);
output_raw = (unsigned char *)malloc(ARRAY_BYTES);
//Memory allocation and Copyting image into GPU memory
cudaMalloc((void**) &d_image, ARRAY_BYTES);
cudaMemcpy(d_image, image, ARRAY_BYTES, cudaMemcpyHostToDevice);
//Defining and memory allocation for output image
unsigned char * d_out;
cudaMalloc((void**) &d_out, ARRAY_BYTES);
//calling main device function to blur
cudaBlur<<<w-1, h-1>>>(d_image, d_out);
//checking error
error = cudaGetLastError();
if(error){
fprintf(stderr, "Kernel launch returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Declaration of cuda thread synchronize
cudaDeviceSynchronize();
//Copyting output back tohost memory
cudaMemcpy(output_raw, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
//Encoding image into output file
lodepng_encode32_file(img_output, output_raw, w, h);
//freeing variable
free(image);
//Stopping Clock
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &difference);
printf("run no %d lasted %lldns or %9.5lfs\n", i, difference, difference / 1000000000.0);
printf("##############################################\n");
}
return 0;
}
|
1cbf9cd158ca75d25c86edb9e000454b88b4b6e8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <vector>
#include <string>
#include <sstream>
#include <climits>
#include <algorithm>
void readField(std::vector<uint8_t> &field, int height) {
std::string inputLine;
for (int i = 0; i <= height; i++)
{
std::getline(std::cin, inputLine);
std::vector<uint8_t> tempvec;
std::istringstream input(inputLine);
std::string temp;
while (std::getline(input, temp, ' '))
{
if (temp == "X")
tempvec.emplace_back(1);
else if (temp == "O")
tempvec.emplace_back(0);
else
std::cerr << "symbol mismatch\n";
}
for (uint8_t i : tempvec) {
field.emplace_back(i);
}
}
}
void writeField(std::vector<uint8_t> &v, int width) {
int count = 0;
for (uint8_t i : v)
{
count++;
// std::cout << static_cast<int>(i) << " ";
std::cout << ((i == 0) ? 'O' : 'X') << " ";
if (count % width == 0)
std::cout << std::endl;
}
}
hipError_t lifeWithCuda(uint8_t *field, unsigned int width, unsigned int height, unsigned int generation);
__global__ void lifeKernelaggregator(uint8_t *field, uint8_t *tempfield, unsigned int width, unsigned int height)
{
for (int b = blockIdx.x; b < height; b += gridDim.x) { // rows are always exclusive to blocks, they can only grow to height, thus never out of the size of the memory segment
for (int t = threadIdx.x; t < width; t += blockDim.x) // threads not growing past the size of width ensures threads not accessing memory where they shouldn't
{
// establish identity of cell
int cellnr = b * width + t;
// calculate 1d aggregated neigbourhood (top + mid + bot) and drop in temp
if (b == 0)
tempfield[cellnr] = field[(width * (height - 1)) + t];
else
tempfield[cellnr] = field[cellnr - width]; // row on top
tempfield[cellnr] += field[cellnr];
if (b == (height - 1))
tempfield[cellnr] += field[(0 * width) + t];
else
tempfield[cellnr] += field[cellnr + width]; //row below
__syncthreads(); // obsolete?
}
}
}
__global__ void lifeKernel(uint8_t *field, uint8_t *tempfield, unsigned int width, unsigned int height)
{
for (int b = blockIdx.x; b < height; b += gridDim.x) { // rows are always exclusive to blocks, they can only grow to height, thus never out of the size of the memory segment
for (int t = threadIdx.x; t < width; t += blockDim.x) // threads not growing past the size of width ensures threads not accessing memory where they shouldn't
{
int cellnr = b * width + t;
// calculate cell value
uint8_t left;
if (t == 0)
left = tempfield[cellnr + width - 1];
else
left = tempfield[cellnr - 1];
uint8_t mid = tempfield[cellnr];
uint8_t right;
if (t == width - 1)
right = tempfield[cellnr + 1 - width];
else
right = tempfield[cellnr + 1];
if (field[cellnr] == 1) {
if (!(3 <= left + mid + right && left + mid + right <= 4))
field[cellnr] = 0;
}
else
{
if (left + mid + right == 3)
field[cellnr] = 1;
}
__syncthreads();
}
}
}
int main()
{
int generations;
std::cin >> generations;
int width;
std::cin >> width;
int height;
std::cin >> height;
std::vector<uint8_t> field; //( width, std::vector<float> ( height, 0 ) ) //could initialize
readField(field, height);
// generate generation X of game of life on field
hipError_t cudaStatus = lifeWithCuda(field.data(), width, height, generations);
if (cudaStatus != hipSuccess) {
std::cerr << "lifeWithCuda failed!\n";
return 1;
}
writeField(field, width);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
std::cerr << "hipDeviceReset failed!\n";
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t lifeWithCuda(uint8_t *field, unsigned int width, unsigned int height, unsigned int generation)
{
uint8_t *dev_field = 0;
uint8_t *dev_field_temp = 0;
hipError_t cudaStatus;
// for robustness against enormous entries
unsigned int maxThreadsperBlock = 1024; // for my device maxThreadsperBlock is 1024
unsigned int maxBlocksperGrid = 12288; // max gridx is 2147483647, but shared memory per block is just 49152 Bytes, i assume 3 times my entry as memory usage, so i pick ~1/4th to be safe
int threadnum = ::min(maxThreadsperBlock, width);
int blocknum = ::min(maxBlocksperGrid, height);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
std::cerr << "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n";
goto Error;
}
// Allocate GPU buffers for two fields (one input, one output) .
cudaStatus = hipMalloc((void**)&dev_field, height * (width * sizeof(uint8_t)));
if (cudaStatus != hipSuccess) {
std::cerr << "hipMalloc failed!\n";
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_field_temp, height * (width * sizeof(uint8_t)));
if (cudaStatus != hipSuccess) {
std::cerr << "hipMalloc failed!\n";
goto Error;
}
// Copy input field from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_field, field, height * (width * sizeof(uint8_t)), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
std::cerr << "hipMemcpy failed!\n";
goto Error;
}
// TODO: figure aout, what kind of thread distribution works best
for (unsigned int g = 0; g < generation; g++)
{
hipLaunchKernelGGL(( lifeKernelaggregator) , dim3(blocknum), dim3(threadnum) , 0, 0, dev_field, dev_field_temp, width, height);
hipLaunchKernelGGL(( lifeKernel) , dim3(blocknum), dim3(threadnum) , 0, 0, dev_field, dev_field_temp, width, height);
std::cout << ".";
}
std::cout << std::endl;
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
std::cerr << "addKernel launch failed: %s\n" << hipGetErrorString(cudaStatus);
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
std::cerr << "hipDeviceSynchronize returned error code %d after launching addKernel!\n" << cudaStatus;
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(field, dev_field, height * (width * sizeof(uint8_t)), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
std::cerr << "hipMemcpy failed!\n";
goto Error;
}
Error:
hipFree(dev_field);
hipFree(dev_field_temp);
return cudaStatus;
}
|
1cbf9cd158ca75d25c86edb9e000454b88b4b6e8.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <vector>
#include <string>
#include <sstream>
#include <climits>
#include <algorithm>
void readField(std::vector<uint8_t> &field, int height) {
std::string inputLine;
for (int i = 0; i <= height; i++)
{
std::getline(std::cin, inputLine);
std::vector<uint8_t> tempvec;
std::istringstream input(inputLine);
std::string temp;
while (std::getline(input, temp, ' '))
{
if (temp == "X")
tempvec.emplace_back(1);
else if (temp == "O")
tempvec.emplace_back(0);
else
std::cerr << "symbol mismatch\n";
}
for (uint8_t i : tempvec) {
field.emplace_back(i);
}
}
}
void writeField(std::vector<uint8_t> &v, int width) {
int count = 0;
for (uint8_t i : v)
{
count++;
// std::cout << static_cast<int>(i) << " ";
std::cout << ((i == 0) ? 'O' : 'X') << " ";
if (count % width == 0)
std::cout << std::endl;
}
}
cudaError_t lifeWithCuda(uint8_t *field, unsigned int width, unsigned int height, unsigned int generation);
__global__ void lifeKernelaggregator(uint8_t *field, uint8_t *tempfield, unsigned int width, unsigned int height)
{
for (int b = blockIdx.x; b < height; b += gridDim.x) { // rows are always exclusive to blocks, they can only grow to height, thus never out of the size of the memory segment
for (int t = threadIdx.x; t < width; t += blockDim.x) // threads not growing past the size of width ensures threads not accessing memory where they shouldn't
{
// establish identity of cell
int cellnr = b * width + t;
// calculate 1d aggregated neigbourhood (top + mid + bot) and drop in temp
if (b == 0)
tempfield[cellnr] = field[(width * (height - 1)) + t];
else
tempfield[cellnr] = field[cellnr - width]; // row on top
tempfield[cellnr] += field[cellnr];
if (b == (height - 1))
tempfield[cellnr] += field[(0 * width) + t];
else
tempfield[cellnr] += field[cellnr + width]; //row below
__syncthreads(); // obsolete?
}
}
}
__global__ void lifeKernel(uint8_t *field, uint8_t *tempfield, unsigned int width, unsigned int height)
{
for (int b = blockIdx.x; b < height; b += gridDim.x) { // rows are always exclusive to blocks, they can only grow to height, thus never out of the size of the memory segment
for (int t = threadIdx.x; t < width; t += blockDim.x) // threads not growing past the size of width ensures threads not accessing memory where they shouldn't
{
int cellnr = b * width + t;
// calculate cell value
uint8_t left;
if (t == 0)
left = tempfield[cellnr + width - 1];
else
left = tempfield[cellnr - 1];
uint8_t mid = tempfield[cellnr];
uint8_t right;
if (t == width - 1)
right = tempfield[cellnr + 1 - width];
else
right = tempfield[cellnr + 1];
if (field[cellnr] == 1) {
if (!(3 <= left + mid + right && left + mid + right <= 4))
field[cellnr] = 0;
}
else
{
if (left + mid + right == 3)
field[cellnr] = 1;
}
__syncthreads();
}
}
}
int main()
{
int generations;
std::cin >> generations;
int width;
std::cin >> width;
int height;
std::cin >> height;
std::vector<uint8_t> field; //( width, std::vector<float> ( height, 0 ) ) //could initialize
readField(field, height);
// generate generation X of game of life on field
cudaError_t cudaStatus = lifeWithCuda(field.data(), width, height, generations);
if (cudaStatus != cudaSuccess) {
std::cerr << "lifeWithCuda failed!\n";
return 1;
}
writeField(field, width);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaDeviceReset failed!\n";
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t lifeWithCuda(uint8_t *field, unsigned int width, unsigned int height, unsigned int generation)
{
uint8_t *dev_field = 0;
uint8_t *dev_field_temp = 0;
cudaError_t cudaStatus;
// for robustness against enormous entries
unsigned int maxThreadsperBlock = 1024; // for my device maxThreadsperBlock is 1024
unsigned int maxBlocksperGrid = 12288; // max gridx is 2147483647, but shared memory per block is just 49152 Bytes, i assume 3 times my entry as memory usage, so i pick ~1/4th to be safe
int threadnum = std::min(maxThreadsperBlock, width);
int blocknum = std::min(maxBlocksperGrid, height);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n";
goto Error;
}
// Allocate GPU buffers for two fields (one input, one output) .
cudaStatus = cudaMalloc((void**)&dev_field, height * (width * sizeof(uint8_t)));
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMalloc failed!\n";
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_field_temp, height * (width * sizeof(uint8_t)));
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMalloc failed!\n";
goto Error;
}
// Copy input field from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_field, field, height * (width * sizeof(uint8_t)), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMemcpy failed!\n";
goto Error;
}
// TODO: figure aout, what kind of thread distribution works best
for (unsigned int g = 0; g < generation; g++)
{
lifeKernelaggregator <<<blocknum, threadnum >>> (dev_field, dev_field_temp, width, height);
lifeKernel <<<blocknum, threadnum >>> (dev_field, dev_field_temp, width, height);
std::cout << ".";
}
std::cout << std::endl;
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
std::cerr << "addKernel launch failed: %s\n" << cudaGetErrorString(cudaStatus);
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaDeviceSynchronize returned error code %d after launching addKernel!\n" << cudaStatus;
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(field, dev_field, height * (width * sizeof(uint8_t)), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMemcpy failed!\n";
goto Error;
}
Error:
cudaFree(dev_field);
cudaFree(dev_field_temp);
return cudaStatus;
}
|
1463f4e7048693ff0d5c55bbc4195f2a251fc522.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <time.h>
#include <math.h>
#include <limits.h>
#include <iostream>
#include <iomanip>
#include <vector>
#include <string>
#include <set>
#include <map>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include "utils.hpp"
#define RAND_FACTOR 1e9+7
// #define THREAD (9 * 256)
#define THREAD 25
#define DEBUG
using namespace std;
map<LL,LL> mp, rmp;
// Index start from 1 in mapped nodes
vector<_HyperEdge> mpu(LL n_nodes, LL n_hedges, LL p, LL q, vector<_HyperEdge> hyperEdge) {
DSH dsh = DSH();
LL threshold = (LL)(p - sqrt((double)n_hedges)), E_dsize = 0, E_ddsize, rnd = 0;
LL sizeRecord = -1;
vector<LL> E_ddash;
set<LL> E, E_dash;
set<LL>::iterator iter;
E.clear();
E_dash.clear();
E_ddash.clear();
for (int i = 0; i < n_hedges; i++)
E.insert((LL)i + 1);
while (E_dsize < threshold) {
dsh.buildFlowGraph(n_nodes, E, hyperEdge, q);
E_ddash = dsh.miniCut();
E_ddsize = E_ddash.size();
if (E_dsize + E_ddsize <= p) {
E_dash.insert(E_ddash.begin(), E_ddash.end());
for (LL i = 0; i < E_ddsize; i++)
E.erase(E_ddash[i]);
}
else {
for (int i = 0; i < p - E_dsize; i++) {
rnd = (LL)rand() % E_ddsize;
E_dash.insert(E_ddash[rnd]);
E.erase(E_ddash[rnd]);
E_ddash.erase(E_ddash.begin() + rnd);
E_ddsize--;
}
}
E_dsize = E_dash.size();
if(E_dsize == sizeRecord || E_dsize >= n_hedges){
printf("ERROR (DEAD) - ");
break;
}
sizeRecord = E_dsize;
}
vector<_HyperEdge> cardinality, result;
cardinality.clear();
result.clear();
for (iter = E.begin(); iter != E.end(); iter++)
cardinality.push_back(hyperEdge[*iter - 1]);
sort(cardinality.begin(), cardinality.end());
for (LL i = 0; cardinality.size() && i < cardinality.size() && i < p - E_dsize; i++)
result.push_back(cardinality[i]);
for (iter = E_dash.begin(); iter != E_dash.end(); iter++)
result.push_back(hyperEdge[*iter - 1]);
return result;
}
void newOutput(LL counter, char* outputFile) {
char str[20] = "output/";
sprintf(str + 7, "%lld_%lld.txt", counter / 5, counter % 5);
strcpy(outputFile, str);
}
__device__ int getIndex(){
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int line = blockDim.x * gridDim.x;
return row * line + col;
}
// __device__ void nd_clearVisit(bool *vis, LL tot, int index){
// memset(vis + index * tot, false, sizeof(bool) * tot);
// }
//
// __device__ bool nd_isVisited(bool* vis, LL nd, LL tot, int index){
// return vis[index * tot + nd - 1];
// }
//
// __device__ bool nd_setVisited(bool* vis, LL nd, LL tot, int index){
// bool oldState = vis[index * tot + nd - 1];
// vis[index * tot + nd - 1] = true;
// return oldState;
// }
__device__ void ndset_clear_global(LL *nodeSet, LL *nodeCount, int index){
nodeSet[index * NODESETSIZE] = 0;
nodeCount[index] = 0;
}
__device__ void ndset_clear(LL* nodeSet, LL *nodeCount, int index){
nodeSet[index * NODESETSIZE] = nodeCount[index];
}
__device__ bool ndset_insert(LL* nodeSet, LL value, int index){
LL cnt = ++nodeSet[index * NODESETSIZE];
if(cnt >= NODESETSIZE)
return false;
nodeSet[index * NODESETSIZE + cnt] = value;
return true;
}
__device__ int ndset_size_global(LL *nodeSet, int index){
return nodeSet[index * NODESETSIZE];
}
__device__ int ndset_size(LL* nodeSet, LL *nodeCount, int index){
return nodeSet[index * NODESETSIZE] - nodeCount[index];
}
__device__ void ndset_erase(LL* nodeSet, LL *nodeCount, int index){
if(ndset_size(nodeSet, nodeCount, index) > 0)
nodeSet[index * NODESETSIZE]--;
}
__device__ bool ndset_new(LL *nodeSet, LL *nodeCount, int index){
if(!ndset_insert(nodeSet, 0, index))
return false;
nodeCount[index] = ndset_size_global(nodeSet, index);
return true;
}
__global__ void setupRandGenerator(float* randSeed, hiprandState_t* state){
int index = getIndex();
unsigned long seed = (unsigned long)(randSeed[index] * RAND_FACTOR);
hiprand_init(seed, index, 0, &state[index]);
}
__global__ void reverseInfluence(LL totalNodes,
LL totalEdges,
LL iter_left,
LL* d_k,
LL* d_adjCount,
LL* d_adjList,
LL* d_nodeSet,
LL* d_nodeCount,
LL source, LL sink,
hiprandState_t *state){
int index = getIndex();
LL startNode, outdegree, nextNode;
LL cur_k;
float probability;
bool neighbor = false, overflow = false;
hiprandState_t localState = state[index];
ndset_clear_global(d_nodeSet, d_nodeCount, index);
bool vis[10000];
while(!overflow){
cur_k = atomicAdd((unsigned long long*)d_k, 1ULL);
if(iter_left <= cur_k){
atomicSub((unsigned int*)d_k, 1U);
break;
}
neighbor = false;
startNode = source;
memset(vis, false, sizeof(vis));
ndset_insert(d_nodeSet, startNode, index);
vis[startNode] = true;
while (true) {
outdegree = d_adjCount[startNode] - d_adjCount[startNode - 1];
probability = hiprand_uniform(&localState) * outdegree;
nextNode = floor(probability);
nextNode += d_adjCount[startNode - 1];
if (nextNode >= d_adjCount[startNode]) {
// printf("%lld >= %lld - choose no point - terminate\n", nextNode, d_adjCount[startNode]);
ndset_clear(d_nodeSet, d_nodeCount, index);
break;
}
// printf("%lld < %lld - choose point\n", nextNode, d_adjCount[startNode]);
for (LL j = d_adjCount[startNode - 1]; j < d_adjCount[startNode]; j++) {
if (d_adjList[j] == sink) {
// printf("reach sink neighbor - terminate\n");
ndset_erase(d_nodeSet, d_nodeCount, index);
neighbor = true;
break;
}
}
if (neighbor)
break;
startNode = d_adjList[nextNode];
if(vis[startNode]){
// printf("visited - terminate\n");
ndset_clear(d_nodeSet, d_nodeCount, index);
break;
}
vis[startNode] = true;
if(!ndset_insert(d_nodeSet, startNode, index)){
// printf("overflow - terminate\n");
overflow = true;
break;
}
}
if(overflow){
ndset_clear(d_nodeSet, d_nodeCount, index);
break;
}
if(ndset_size(d_nodeSet, d_nodeCount, index) == 0)
continue;
if(!ndset_new(d_nodeSet, d_nodeCount, index))
break;
}
state[index] = localState;
}
int main(int argc, char** argv) {
LL source = 0, sink = 0, lines;
string filePath;
LL p, q, k = 1000;
// char outputFile[20];
long startTime;
LL totalNodes = 0, totalEdges = 0;
LL* h_adjCount = NULL, *h_adjList = NULL, *h_nodeSet = NULL;
cout << "Choose dataset: ";
cout.flush();
// cin >> filePath;
filePath = "../../data/wiki/wiki.txt";
rmp = readGraph(filePath.c_str(), h_adjList, h_adjCount, totalNodes, totalEdges, mp, true);
cout << "Choose input file: ";
cout.flush();
// cin >> filePath;
filePath = "../../data/wiki/input.txt";
FILE* fd = fopen(filePath.c_str(), "r");
cout << "How many lines: ";
cout.flush();
// cin >> lines;
lines = 1;
printf("========= NEW RUN\n");
printf("This graph contains %lld nodes connected by %lld edges\n", totalNodes, totalEdges);
printf("Running on %d threads\n\n", THREAD);
float alpha, beta, pmax;
float kmax, dif;
LL counter = 0, loop, iters, iter_left;
srand(time(NULL));
vector<_HyperEdge> hyperEdge;
set<LL> nodeSet;
vector<_HyperEdge> E;
dim3 gridSize(3,3), blockSize(16,16);
dim3 testBlock(5,5);
float *d_randSeed;
hiprandState_t *d_randState;
hiprandGenerator_t curandGenerator;
hipMalloc((void**)&d_randSeed, sizeof(float) * THREAD);
hipMalloc((void**)&d_randState, sizeof(hiprandState_t) * THREAD);
hiprandCreateGenerator(&curandGenerator, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(curandGenerator, time(NULL));
hiprandGenerateUniform(curandGenerator, d_randSeed, THREAD);
// setupRandGenerator<<<gridSize,blockSize>>>(d_randSeed, d_randState);
hipLaunchKernelGGL(( setupRandGenerator), dim3(1),dim3(testBlock), 0, 0, d_randSeed, d_randState);
LL* d_adjCount = NULL, *d_adjList = NULL, *d_nodeSet = NULL, *d_k = NULL;
LL *d_nodeCount = NULL;
hipMalloc((void**)&d_nodeSet, sizeof(LL) * THREAD * NODESETSIZE);
hipMalloc((void**)&d_adjCount, sizeof(LL) * (totalNodes + 1));
hipMalloc((void**)&d_adjList, sizeof(LL) * totalEdges);
hipMalloc((void**)&d_nodeCount, sizeof(LL) * THREAD);
hipMalloc((void**)&d_k, sizeof(LL));
hipMemcpy(d_adjList, h_adjList, sizeof(LL)*totalEdges, hipMemcpyHostToDevice);
hipMemcpy(d_adjCount, h_adjCount, sizeof(LL)*(totalNodes+1),hipMemcpyHostToDevice);
FILE* wfd = fopen("output.txt", "w");
// printf(" ln %%diff time per loop\n");
while (~fscanf(fd, "s %lld t %lld alpha %f L %lld pmax %f beta %f\n", &sink, &source, &alpha, &k, &pmax, &beta)) {
counter++;
// printf("#%4lld - ", counter);
// fflush(stdout);
kmax = k * pmax;
hyperEdge.clear();
sink = mp[sink];
source = mp[source];
startTime = clock();
iter_left = k;
hipMemset(d_k, 0LL, sizeof(LL));
h_nodeSet = new LL[THREAD * NODESETSIZE];
loop = 0;
while(iter_left > 0){
// reverseInfluence<<<gridSize,blockSize>>>( totalNodes, totalEdges, k,
hipLaunchKernelGGL(( reverseInfluence), dim3(1),dim3(testBlock), 0, 0, totalNodes, totalEdges, k,
d_k,
d_adjCount,
d_adjList,
d_nodeSet,
d_nodeCount,
source, sink,
d_randState );
hipMemcpy(h_nodeSet, d_nodeSet, sizeof(LL)*THREAD*NODESETSIZE, hipMemcpyDeviceToHost);
hipMemcpy(&iters, d_k, sizeof(LL), hipMemcpyDeviceToHost);
loop++;
iter_left = k - iters;
_HyperEdge item;
for(int i = 0;i < THREAD;i++){
item.v_size = 0;
LL offset = i * NODESETSIZE;
for(LL j = 1;j <= h_nodeSet[offset];j++){
if(h_nodeSet[offset + j] == 0){
hyperEdge.push_back(item);
item.v_size = 0;
continue;
}
item.vertex[item.v_size++] = h_nodeSet[offset + j];
if(j == h_nodeSet[offset] && h_nodeSet[offset + j] != 0){
hyperEdge.push_back(item);
item.v_size = 0;
}
// printf("%lld / %lld, hyperedge size: %ld\n", iter_left, k, hyperEdge.size());
// printf("%lld ", h_nodeSet[offset + j]);
}
// printf("\n");
}
printf("%lld round(%lld left), %ld hyperedges\n", iters, iter_left, hyperEdge.size());
// iter_left = 0;
}
delete[] h_nodeSet;
q = ((hyperEdge.size() / totalNodes) + hyperEdge.size()) / 2;
p = (LL)(beta * hyperEdge.size());
dif = kmax - hyperEdge.size();
if(dif < 0) dif = -dif;
printf("%.4f%% - \n", dif / kmax * 100);
fflush(stdout);
break;
nodeSet.clear();
E.clear();
if (hyperEdge.size() > 0)
E = mpu(totalNodes, (LL)hyperEdge.size(), p, q, hyperEdge);
for (LL i = 0; i < E.size(); i++)
nodeSet.insert(E[i].vertex, E[i].vertex + E[i].v_size);
startTime = clock() - startTime;
printf("%ld s %3ld ms - %lld s %3lld ms\n",
startTime / CLOCKS_PER_SEC,
startTime % CLOCKS_PER_SEC / 1000,
startTime / loop / CLOCKS_PER_SEC,
startTime / loop % CLOCKS_PER_SEC / 1000);
for (auto i = nodeSet.begin(); i != nodeSet.end(); i++)
fprintf(wfd, "%lld ", rmp[*i]);
fprintf(wfd, "\n");
if (lines > 0 && counter >= lines)
break;
}
fflush(wfd);
fclose(wfd);
fclose(fd);
hipFree(d_randSeed);
hipFree(d_randState);
hipFree(d_nodeSet);
hipFree(d_adjCount);
hipFree(d_adjList);
hipFree(d_nodeCount);
hipFree(d_k);
delete[] h_adjCount;
delete[] h_adjList;
printf("\n========= FINISH\n");
return 0;
}
|
1463f4e7048693ff0d5c55bbc4195f2a251fc522.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <time.h>
#include <math.h>
#include <limits.h>
#include <iostream>
#include <iomanip>
#include <vector>
#include <string>
#include <set>
#include <map>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include "utils.hpp"
#define RAND_FACTOR 1e9+7
// #define THREAD (9 * 256)
#define THREAD 25
#define DEBUG
using namespace std;
map<LL,LL> mp, rmp;
// Index start from 1 in mapped nodes
vector<_HyperEdge> mpu(LL n_nodes, LL n_hedges, LL p, LL q, vector<_HyperEdge> hyperEdge) {
DSH dsh = DSH();
LL threshold = (LL)(p - sqrt((double)n_hedges)), E_dsize = 0, E_ddsize, rnd = 0;
LL sizeRecord = -1;
vector<LL> E_ddash;
set<LL> E, E_dash;
set<LL>::iterator iter;
E.clear();
E_dash.clear();
E_ddash.clear();
for (int i = 0; i < n_hedges; i++)
E.insert((LL)i + 1);
while (E_dsize < threshold) {
dsh.buildFlowGraph(n_nodes, E, hyperEdge, q);
E_ddash = dsh.miniCut();
E_ddsize = E_ddash.size();
if (E_dsize + E_ddsize <= p) {
E_dash.insert(E_ddash.begin(), E_ddash.end());
for (LL i = 0; i < E_ddsize; i++)
E.erase(E_ddash[i]);
}
else {
for (int i = 0; i < p - E_dsize; i++) {
rnd = (LL)rand() % E_ddsize;
E_dash.insert(E_ddash[rnd]);
E.erase(E_ddash[rnd]);
E_ddash.erase(E_ddash.begin() + rnd);
E_ddsize--;
}
}
E_dsize = E_dash.size();
if(E_dsize == sizeRecord || E_dsize >= n_hedges){
printf("ERROR (DEAD) - ");
break;
}
sizeRecord = E_dsize;
}
vector<_HyperEdge> cardinality, result;
cardinality.clear();
result.clear();
for (iter = E.begin(); iter != E.end(); iter++)
cardinality.push_back(hyperEdge[*iter - 1]);
sort(cardinality.begin(), cardinality.end());
for (LL i = 0; cardinality.size() && i < cardinality.size() && i < p - E_dsize; i++)
result.push_back(cardinality[i]);
for (iter = E_dash.begin(); iter != E_dash.end(); iter++)
result.push_back(hyperEdge[*iter - 1]);
return result;
}
void newOutput(LL counter, char* outputFile) {
char str[20] = "output/";
sprintf(str + 7, "%lld_%lld.txt", counter / 5, counter % 5);
strcpy(outputFile, str);
}
__device__ int getIndex(){
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int line = blockDim.x * gridDim.x;
return row * line + col;
}
// __device__ void nd_clearVisit(bool *vis, LL tot, int index){
// memset(vis + index * tot, false, sizeof(bool) * tot);
// }
//
// __device__ bool nd_isVisited(bool* vis, LL nd, LL tot, int index){
// return vis[index * tot + nd - 1];
// }
//
// __device__ bool nd_setVisited(bool* vis, LL nd, LL tot, int index){
// bool oldState = vis[index * tot + nd - 1];
// vis[index * tot + nd - 1] = true;
// return oldState;
// }
__device__ void ndset_clear_global(LL *nodeSet, LL *nodeCount, int index){
nodeSet[index * NODESETSIZE] = 0;
nodeCount[index] = 0;
}
__device__ void ndset_clear(LL* nodeSet, LL *nodeCount, int index){
nodeSet[index * NODESETSIZE] = nodeCount[index];
}
__device__ bool ndset_insert(LL* nodeSet, LL value, int index){
LL cnt = ++nodeSet[index * NODESETSIZE];
if(cnt >= NODESETSIZE)
return false;
nodeSet[index * NODESETSIZE + cnt] = value;
return true;
}
__device__ int ndset_size_global(LL *nodeSet, int index){
return nodeSet[index * NODESETSIZE];
}
__device__ int ndset_size(LL* nodeSet, LL *nodeCount, int index){
return nodeSet[index * NODESETSIZE] - nodeCount[index];
}
__device__ void ndset_erase(LL* nodeSet, LL *nodeCount, int index){
if(ndset_size(nodeSet, nodeCount, index) > 0)
nodeSet[index * NODESETSIZE]--;
}
__device__ bool ndset_new(LL *nodeSet, LL *nodeCount, int index){
if(!ndset_insert(nodeSet, 0, index))
return false;
nodeCount[index] = ndset_size_global(nodeSet, index);
return true;
}
__global__ void setupRandGenerator(float* randSeed, curandState* state){
int index = getIndex();
unsigned long seed = (unsigned long)(randSeed[index] * RAND_FACTOR);
curand_init(seed, index, 0, &state[index]);
}
__global__ void reverseInfluence(LL totalNodes,
LL totalEdges,
LL iter_left,
LL* d_k,
LL* d_adjCount,
LL* d_adjList,
LL* d_nodeSet,
LL* d_nodeCount,
LL source, LL sink,
curandState *state){
int index = getIndex();
LL startNode, outdegree, nextNode;
LL cur_k;
float probability;
bool neighbor = false, overflow = false;
curandState localState = state[index];
ndset_clear_global(d_nodeSet, d_nodeCount, index);
bool vis[10000];
while(!overflow){
cur_k = atomicAdd((unsigned long long*)d_k, 1ULL);
if(iter_left <= cur_k){
atomicSub((unsigned int*)d_k, 1U);
break;
}
neighbor = false;
startNode = source;
memset(vis, false, sizeof(vis));
ndset_insert(d_nodeSet, startNode, index);
vis[startNode] = true;
while (true) {
outdegree = d_adjCount[startNode] - d_adjCount[startNode - 1];
probability = curand_uniform(&localState) * outdegree;
nextNode = floor(probability);
nextNode += d_adjCount[startNode - 1];
if (nextNode >= d_adjCount[startNode]) {
// printf("%lld >= %lld - choose no point - terminate\n", nextNode, d_adjCount[startNode]);
ndset_clear(d_nodeSet, d_nodeCount, index);
break;
}
// printf("%lld < %lld - choose point\n", nextNode, d_adjCount[startNode]);
for (LL j = d_adjCount[startNode - 1]; j < d_adjCount[startNode]; j++) {
if (d_adjList[j] == sink) {
// printf("reach sink neighbor - terminate\n");
ndset_erase(d_nodeSet, d_nodeCount, index);
neighbor = true;
break;
}
}
if (neighbor)
break;
startNode = d_adjList[nextNode];
if(vis[startNode]){
// printf("visited - terminate\n");
ndset_clear(d_nodeSet, d_nodeCount, index);
break;
}
vis[startNode] = true;
if(!ndset_insert(d_nodeSet, startNode, index)){
// printf("overflow - terminate\n");
overflow = true;
break;
}
}
if(overflow){
ndset_clear(d_nodeSet, d_nodeCount, index);
break;
}
if(ndset_size(d_nodeSet, d_nodeCount, index) == 0)
continue;
if(!ndset_new(d_nodeSet, d_nodeCount, index))
break;
}
state[index] = localState;
}
int main(int argc, char** argv) {
LL source = 0, sink = 0, lines;
string filePath;
LL p, q, k = 1000;
// char outputFile[20];
long startTime;
LL totalNodes = 0, totalEdges = 0;
LL* h_adjCount = NULL, *h_adjList = NULL, *h_nodeSet = NULL;
cout << "Choose dataset: ";
cout.flush();
// cin >> filePath;
filePath = "../../data/wiki/wiki.txt";
rmp = readGraph(filePath.c_str(), h_adjList, h_adjCount, totalNodes, totalEdges, mp, true);
cout << "Choose input file: ";
cout.flush();
// cin >> filePath;
filePath = "../../data/wiki/input.txt";
FILE* fd = fopen(filePath.c_str(), "r");
cout << "How many lines: ";
cout.flush();
// cin >> lines;
lines = 1;
printf("========= NEW RUN\n");
printf("This graph contains %lld nodes connected by %lld edges\n", totalNodes, totalEdges);
printf("Running on %d threads\n\n", THREAD);
float alpha, beta, pmax;
float kmax, dif;
LL counter = 0, loop, iters, iter_left;
srand(time(NULL));
vector<_HyperEdge> hyperEdge;
set<LL> nodeSet;
vector<_HyperEdge> E;
dim3 gridSize(3,3), blockSize(16,16);
dim3 testBlock(5,5);
float *d_randSeed;
curandState *d_randState;
curandGenerator_t curandGenerator;
cudaMalloc((void**)&d_randSeed, sizeof(float) * THREAD);
cudaMalloc((void**)&d_randState, sizeof(curandState) * THREAD);
curandCreateGenerator(&curandGenerator, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(curandGenerator, time(NULL));
curandGenerateUniform(curandGenerator, d_randSeed, THREAD);
// setupRandGenerator<<<gridSize,blockSize>>>(d_randSeed, d_randState);
setupRandGenerator<<<1,testBlock>>>(d_randSeed, d_randState);
LL* d_adjCount = NULL, *d_adjList = NULL, *d_nodeSet = NULL, *d_k = NULL;
LL *d_nodeCount = NULL;
cudaMalloc((void**)&d_nodeSet, sizeof(LL) * THREAD * NODESETSIZE);
cudaMalloc((void**)&d_adjCount, sizeof(LL) * (totalNodes + 1));
cudaMalloc((void**)&d_adjList, sizeof(LL) * totalEdges);
cudaMalloc((void**)&d_nodeCount, sizeof(LL) * THREAD);
cudaMalloc((void**)&d_k, sizeof(LL));
cudaMemcpy(d_adjList, h_adjList, sizeof(LL)*totalEdges, cudaMemcpyHostToDevice);
cudaMemcpy(d_adjCount, h_adjCount, sizeof(LL)*(totalNodes+1),cudaMemcpyHostToDevice);
FILE* wfd = fopen("output.txt", "w");
// printf(" ln %%diff time per loop\n");
while (~fscanf(fd, "s %lld t %lld alpha %f L %lld pmax %f beta %f\n", &sink, &source, &alpha, &k, &pmax, &beta)) {
counter++;
// printf("#%4lld - ", counter);
// fflush(stdout);
kmax = k * pmax;
hyperEdge.clear();
sink = mp[sink];
source = mp[source];
startTime = clock();
iter_left = k;
cudaMemset(d_k, 0LL, sizeof(LL));
h_nodeSet = new LL[THREAD * NODESETSIZE];
loop = 0;
while(iter_left > 0){
// reverseInfluence<<<gridSize,blockSize>>>( totalNodes, totalEdges, k,
reverseInfluence<<<1,testBlock>>>( totalNodes, totalEdges, k,
d_k,
d_adjCount,
d_adjList,
d_nodeSet,
d_nodeCount,
source, sink,
d_randState );
cudaMemcpy(h_nodeSet, d_nodeSet, sizeof(LL)*THREAD*NODESETSIZE, cudaMemcpyDeviceToHost);
cudaMemcpy(&iters, d_k, sizeof(LL), cudaMemcpyDeviceToHost);
loop++;
iter_left = k - iters;
_HyperEdge item;
for(int i = 0;i < THREAD;i++){
item.v_size = 0;
LL offset = i * NODESETSIZE;
for(LL j = 1;j <= h_nodeSet[offset];j++){
if(h_nodeSet[offset + j] == 0){
hyperEdge.push_back(item);
item.v_size = 0;
continue;
}
item.vertex[item.v_size++] = h_nodeSet[offset + j];
if(j == h_nodeSet[offset] && h_nodeSet[offset + j] != 0){
hyperEdge.push_back(item);
item.v_size = 0;
}
// printf("%lld / %lld, hyperedge size: %ld\n", iter_left, k, hyperEdge.size());
// printf("%lld ", h_nodeSet[offset + j]);
}
// printf("\n");
}
printf("%lld round(%lld left), %ld hyperedges\n", iters, iter_left, hyperEdge.size());
// iter_left = 0;
}
delete[] h_nodeSet;
q = ((hyperEdge.size() / totalNodes) + hyperEdge.size()) / 2;
p = (LL)(beta * hyperEdge.size());
dif = kmax - hyperEdge.size();
if(dif < 0) dif = -dif;
printf("%.4f%% - \n", dif / kmax * 100);
fflush(stdout);
break;
nodeSet.clear();
E.clear();
if (hyperEdge.size() > 0)
E = mpu(totalNodes, (LL)hyperEdge.size(), p, q, hyperEdge);
for (LL i = 0; i < E.size(); i++)
nodeSet.insert(E[i].vertex, E[i].vertex + E[i].v_size);
startTime = clock() - startTime;
printf("%ld s %3ld ms - %lld s %3lld ms\n",
startTime / CLOCKS_PER_SEC,
startTime % CLOCKS_PER_SEC / 1000,
startTime / loop / CLOCKS_PER_SEC,
startTime / loop % CLOCKS_PER_SEC / 1000);
for (auto i = nodeSet.begin(); i != nodeSet.end(); i++)
fprintf(wfd, "%lld ", rmp[*i]);
fprintf(wfd, "\n");
if (lines > 0 && counter >= lines)
break;
}
fflush(wfd);
fclose(wfd);
fclose(fd);
cudaFree(d_randSeed);
cudaFree(d_randState);
cudaFree(d_nodeSet);
cudaFree(d_adjCount);
cudaFree(d_adjList);
cudaFree(d_nodeCount);
cudaFree(d_k);
delete[] h_adjCount;
delete[] h_adjList;
printf("\n========= FINISH\n");
return 0;
}
|
4df858157c0e91ebf35fb463ea210a2bdb003e7a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AddScalar.h"
#include <iostream>
#include <assert.h>
#include "GM.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void addScalar(float a, float b, float* ptrDevSum);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
AddScalar::AddScalar(float a , float b , float* ptrSum) :
a(a),//
b(b), //
ptrSum(ptrSum)
{
this->sizeFloat = sizeof(float); // [octet]
// MM
{
GM::malloc(&prtDevSum, sizeFloat);
}
}
AddScalar::~AddScalar(void)
{
//MM
{
GM::free(prtDevSum);
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void AddScalar::run()
{
// Grid : specifier le nombre de thread
dim3 dg(1, 1, 1);
dim3 db(1, 1, 1);
assert(dg.x * dg.y * dg.z * db.x * db.y * db.z == 1);// 1 seul thread suffit
hipLaunchKernelGGL(( addScalar), dim3(dg),dim3(db), 0, 0, a, b, prtDevSum); // assynchrone
//Device::synchronize(); // necessaire only pour printf coter device
// MM (Device -> Host)
{
GM::memcpyDToH(ptrSum, prtDevSum, sizeFloat); // MM = barriere synchronisation implicite
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
4df858157c0e91ebf35fb463ea210a2bdb003e7a.cu
|
#include "AddScalar.h"
#include <iostream>
#include <assert.h>
#include "GM.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void addScalar(float a, float b, float* ptrDevSum);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
AddScalar::AddScalar(float a , float b , float* ptrSum) :
a(a),//
b(b), //
ptrSum(ptrSum)
{
this->sizeFloat = sizeof(float); // [octet]
// MM
{
GM::malloc(&prtDevSum, sizeFloat);
}
}
AddScalar::~AddScalar(void)
{
//MM
{
GM::free(prtDevSum);
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void AddScalar::run()
{
// Grid : specifier le nombre de thread
dim3 dg(1, 1, 1);
dim3 db(1, 1, 1);
assert(dg.x * dg.y * dg.z * db.x * db.y * db.z == 1);// 1 seul thread suffit
addScalar<<<dg,db>>>(a, b, prtDevSum); // assynchrone
//Device::synchronize(); // necessaire only pour printf coter device
// MM (Device -> Host)
{
GM::memcpyDToH(ptrSum, prtDevSum, sizeFloat); // MM = barriere synchronisation implicite
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
mmm_shared.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Minimal CUDA program, intended just to test ability
to compile and run a CUDA program
nvcc cuda_test.cu -o cuda_test
You need to follow instructions provided elsewhere, such as in the
"SCC-for-EC527" slides, both of the SCC_Cheatsheet PDFs, and
SCC_Getting_Started PDFs, to get onto the system where you can
compile and run this.
To understand the program, of course you should read the lecture notes
(slides) that have "GPU" in the name.
*/
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include "cuPrintf.hip"
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), (char *)__FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "CUDA_SAFE_CALL: %s %s %d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//#define NUM_THREADS_PER_BLOCK 256
#ifndef NUM_BLOCKS
#define NUM_BLOCKS 16
#endif
#define PRINT_TIME 1
#define SM_ARR_LEN 50000
#define TOL 1e-6
#ifndef ARR_SIZE
#define ARR_SIZE 192
#endif
//#define ITERS 2000
#define IMUL(a, b) __mul24(a, b)
float getChecksum(float* C, int length) {
int i, j;
float sum = 0;
for (i = 0; i < length; i++) {
for (j = 0; j < length; j++) {
sum += C[i*length+j];
}
}
return sum;
}
void printMat(float* A, int length) {
int i, j;
//data_t sum;
//long int length = get_matrix_rowlen(A);
//data_t *a0 = get_matrix_start(A);
for (i = 0; i < length; i++) {
for (j = 0; j < length; j++) {
fprintf(stderr, "%05f\t", A[i*length+j]);
}
fprintf(stderr, "\n");
}
}
// Matrix multiplication on the (CPU)
// host in double precision
void MatrixMulOnHost(float* M, float* N, float* P, int Width) {
for (int i = 0; i < Width; ++i)
for (int j = 0; j < Width; ++j) {
float sum = 0;
for (int k = 0; k < Width; ++k) {
float a = M[i * Width + k];
float b = N[k * Width + j];
sum += a * b;
}
P[i * Width + j] = sum;
}
}
//blocking matrix multiply
void MatrixMulOnHostBlocked(float* A, float* B, float* C, int Width) {
int i, j, k, kk, jj;
float sum;
int bsize = 8;
int en = bsize * (Width/bsize); // Amount that fits evenly into blocks
for (kk = 0; kk < en; kk += bsize) {
for (jj = 0; jj < en; jj += bsize) {
for (i = 0; i < Width; i++) {
for (j = jj; j < jj + bsize; j++) {
sum = C[i*Width+j];
for (k = kk; k < kk + bsize; k++) {
sum += A[i*Width+k]*B[k*Width+j];
}
C[i*Width+j] = sum;
}
}
}
}
}
void initializeArrayRand2D(float *arr, int len, int seed);
void initializeArrayOrdered2D(float *arr, int len);
// Matrix multiplication kernel
// per thread code
__global__ void MatrixMulKernelGlobal(float* Md , float* Nd , float* Pd, int Width) {
// Pvalueis used to store the element of the
// matrix that is computed by the thread
//cuPrintf("%f\n", Md[threadIdx.y*Width+threadIdx.x]);
int Row = blockIdx.y*(ARR_SIZE/NUM_BLOCKS) + threadIdx.y;
int Col = blockIdx.x*(ARR_SIZE/NUM_BLOCKS) + threadIdx.x;
float Pvalue = 0;
for (int k = 0; k < Width; ++k) {
// float Melement = Md [threadIdx.y*Width+k];
// float Nelement = Nd [k*Width+threadIdx.x];
Pvalue += Md[Row*Width+k] * Nd[k*Width+Col];
}
//cuPrintf("%f\n", Pvalue);
Pd[Row*Width+Col] = Pvalue;
}
__global__ void MatrixMulKernelShared(float* Md, float* Nd, float* Pd, int Width) {
const int tile_width = ARR_SIZE/NUM_BLOCKS;
__shared__ float Mds[tile_width][tile_width]; // Shared memory
__shared__ float Nds[tile_width][tile_width]; // declarations
int bx = blockIdx.x; int by = blockIdx.y; // ID thread
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * tile_width + ty;
int Col = bx * tile_width + tx;
float Pvalue = 0; // REGISTER!
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width/tile_width; ++m) {
// Collaborative loading of Md and Nd tiles into shared memory
Mds[ty][tx] = Md[Row*Width + (m*tile_width + tx)];
Nds[ty][tx] = Nd[Col + (m*tile_width + ty)*Width];
__syncthreads();
for (int k = 0; k < tile_width; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
//from https://github.com/tpn/cuda-samples/blob/master/v8.0/0_Simple/matrixMul_nvrtc/matrixMul_kernel.cu
__global__ void matrixMulCUDA(float *C, float *A, float *B, int width)
{
const int BLOCK_SIZE = ARR_SIZE/NUM_BLOCKS;
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = width * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + width - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * width;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + width * ty + tx];
Bs[ty][tx] = B[b + width * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = width * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + width * ty + tx] = Csub;
}
int main(int argc, char **argv){
// GPU Timing variables
hipEvent_t startOuter, startInner, /*startSerial,*/ stopOuter, stopInner/*, stopSerial*/;
float elapsed_gpu_outer, elapsed_gpu_inner/*, elapsed_serial*/;
// Arrays on GPU global memoryc
float *d_x;
float *d_y;
float *d_result;
// Arrays on the host memory
float *h_x;
float *h_y;
float *h_result;
//float *h_serial;
//int i, j, errCount = 0, zeroCount = 0;
/*
if (argc > 1) {
ARR_SIZE = atoi(argv[1]);
}
else {
ARR_SIZE = SM_ARR_LEN;
}*/
fprintf(stderr, "Length of the array = %d\n", ARR_SIZE);
// Select GPU
CUDA_SAFE_CALL(hipSetDevice(0));
// Allocate GPU memory
size_t allocSize = ARR_SIZE * ARR_SIZE * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_x, allocSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_y, allocSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_result, allocSize));
// Allocate arrays on host memory
h_x = (float *) malloc(allocSize);
h_y = (float *) malloc(allocSize);
h_result = (float *) malloc(allocSize);
//h_serial = (float *) malloc(allocSize);
// Initialize the host arrays
fprintf(stderr, "\nInitializing the arrays ...");
// Arrays are initialized with a known seed for reproducability
initializeArrayOrdered2D(h_x, ARR_SIZE);
initializeArrayOrdered2D(h_y, ARR_SIZE);
//initializeArray1D(h_y, ARR_SIZE, 1467);
fprintf(stderr, "\t... done\n");
fprintf(stderr, "Creating cuda events ...");
#if PRINT_TIME
// Create the cuda events
hipEventCreate(&startOuter);
hipEventCreate(&startInner);
hipEventCreate(&stopOuter);
hipEventCreate(&stopInner);
// Record event on the default stream
hipEventRecord(startOuter, 0);
#endif
//fprintf(stderr, "\t... done\n");
//fprintf(stderr, "Transferring arrays to GPU memory ...");
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(hipMemcpy(d_x, h_x, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_y, h_y, allocSize, hipMemcpyHostToDevice));
dim3 dimGrid(NUM_BLOCKS, NUM_BLOCKS);
dim3 dimBlock(ARR_SIZE/NUM_BLOCKS, ARR_SIZE/NUM_BLOCKS);
//fprintf(stderr, "\t... done\n");
// printf("Launching kernel");
// Launch the kernel
//cudaPrintfInit();
//fprintf(stderr, "Kernel initialized\n");
#if PRINT_TIME
hipEventRecord(startInner, 0);
#endif
hipLaunchKernelGGL(( MatrixMulKernelShared), dim3(dimGrid), dim3(dimBlock), 0, 0, d_x, d_y, d_result, ARR_SIZE);
//matrixMulCUDA<<<dimGrid, dimBlock>>>(d_x, d_y, d_result, ARR_SIZE);
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
// Check for errors during launch
CUDA_SAFE_CALL(hipPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(hipMemcpy(h_result, d_result, allocSize, hipMemcpyDeviceToHost));
#if PRINT_TIME
// Stop and destroy the timer
hipEventRecord(stopOuter,0);
hipEventRecord(stopInner,0);
hipEventSynchronize(stopOuter);
hipEventSynchronize(stopInner);
hipEventElapsedTime(&elapsed_gpu_outer, startOuter, stopOuter);
hipEventElapsedTime(&elapsed_gpu_inner, startInner, stopInner);
//printf("\nGPU time (start-to-finish): %f (msec)\n", elapsed_gpu_outer);
//printf("GPU time (kernel only): %f (msec)\n", elapsed_gpu_inner);
printf("%f", elapsed_gpu_outer/1000.0);
hipEventDestroy(startOuter);
hipEventDestroy(startInner);
hipEventDestroy(stopOuter);
hipEventDestroy(stopInner);
#endif
fprintf(stderr, "Grid size: %d\nBlock size: %d\n", NUM_BLOCKS, ARR_SIZE/NUM_BLOCKS);
float checksumGPU = 0;
//float checksumSerial = 0;
// get checksum
checksumGPU = getChecksum(h_result, ARR_SIZE);
if (ARR_SIZE <= 8) {
fprintf(stderr, "\n");
printMat(h_result, ARR_SIZE);
}
fprintf(stderr, "Checksum: %f\n", checksumGPU);
/*
hipEventCreate(&startSerial);
hipEventCreate(&stopSerial);
hipEventRecord(startSerial, 0);
MatrixMulOnHostBlocked(h_x, h_y, h_serial, ARR_SIZE);
hipEventRecord(stopSerial,0);
hipEventSynchronize(stopSerial);
hipEventElapsedTime(&elapsed_serial, startSerial, stopSerial);
printf("Blocked serial time : %f (msec)\n", elapsed_serial);
hipEventDestroy(startSerial);
hipEventDestroy(stopSerial);
checksumSerial = getChecksum(h_serial, ARR_SIZE);
printf("Serial checksum: %f\n", checksumSerial);
double maxDiff = 0.0;
double diff;
// Compare the results
for(i = 0; i < ARR_SIZE; i++) {
for(j = 0; j < ARR_SIZE; j++) {
diff = fabs(h_result[i*ARR_SIZE+j] - h_serial[i*ARR_SIZE+j]);
if (diff > maxDiff) {
//printf("%f - %f = %f", h_result[i*ARR_SIZE+j], h_serial[i*ARR_SIZE+j], diff);
maxDiff = diff;
}
}
}
printf("Maximum difference: %f\n", maxDiff);
*/
/*
for(i = 0; i < 50; i++) {
printf("%d:\t%.8f\t%.8f\n", i, h_result_gold[i], h_result[i]);
}
*/
/*
if (errCount > 0) {
fprintf(stderr, "\n@ERROR: TEST FAILED: %d results did not matched\n", errCount);
}
else if (zeroCount > 0){
fprintf(stderr, "\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount);
}
else {
fprintf(stderr, "\nTEST PASSED: All results matched\n");
}
*/
// Free-up device and host memory
CUDA_SAFE_CALL(hipFree(d_x));
CUDA_SAFE_CALL(hipFree(d_y));
CUDA_SAFE_CALL(hipFree(d_result));
free(h_x);
free(h_y);
free(h_result);
//free(h_serial);
return 0;
}
void initializeArrayRand2D(float *arr, int len, int seed) {
int i, j;
float randNum;
srand(seed);
for (i = 0; i < len; i++) {
for (j = 0; j < len; j++) {
randNum = (float) rand() / (float)(RAND_MAX/len);
//printf("%f\n", randNum);
arr[i * len + j] = randNum;
}
}
}
void initializeArrayOrdered2D(float *arr, int len) {
long int i;
for (i = 0; i < len*len; i++) {
arr[i] = (float)i;
}
}
|
mmm_shared.cu
|
/*
Minimal CUDA program, intended just to test ability
to compile and run a CUDA program
nvcc cuda_test.cu -o cuda_test
You need to follow instructions provided elsewhere, such as in the
"SCC-for-EC527" slides, both of the SCC_Cheatsheet PDFs, and
SCC_Getting_Started PDFs, to get onto the system where you can
compile and run this.
To understand the program, of course you should read the lecture notes
(slides) that have "GPU" in the name.
*/
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include "cuPrintf.cu"
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), (char *)__FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA_SAFE_CALL: %s %s %d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//#define NUM_THREADS_PER_BLOCK 256
#ifndef NUM_BLOCKS
#define NUM_BLOCKS 16
#endif
#define PRINT_TIME 1
#define SM_ARR_LEN 50000
#define TOL 1e-6
#ifndef ARR_SIZE
#define ARR_SIZE 192
#endif
//#define ITERS 2000
#define IMUL(a, b) __mul24(a, b)
float getChecksum(float* C, int length) {
int i, j;
float sum = 0;
for (i = 0; i < length; i++) {
for (j = 0; j < length; j++) {
sum += C[i*length+j];
}
}
return sum;
}
void printMat(float* A, int length) {
int i, j;
//data_t sum;
//long int length = get_matrix_rowlen(A);
//data_t *a0 = get_matrix_start(A);
for (i = 0; i < length; i++) {
for (j = 0; j < length; j++) {
fprintf(stderr, "%05f\t", A[i*length+j]);
}
fprintf(stderr, "\n");
}
}
// Matrix multiplication on the (CPU)
// host in double precision
void MatrixMulOnHost(float* M, float* N, float* P, int Width) {
for (int i = 0; i < Width; ++i)
for (int j = 0; j < Width; ++j) {
float sum = 0;
for (int k = 0; k < Width; ++k) {
float a = M[i * Width + k];
float b = N[k * Width + j];
sum += a * b;
}
P[i * Width + j] = sum;
}
}
//blocking matrix multiply
void MatrixMulOnHostBlocked(float* A, float* B, float* C, int Width) {
int i, j, k, kk, jj;
float sum;
int bsize = 8;
int en = bsize * (Width/bsize); // Amount that fits evenly into blocks
for (kk = 0; kk < en; kk += bsize) {
for (jj = 0; jj < en; jj += bsize) {
for (i = 0; i < Width; i++) {
for (j = jj; j < jj + bsize; j++) {
sum = C[i*Width+j];
for (k = kk; k < kk + bsize; k++) {
sum += A[i*Width+k]*B[k*Width+j];
}
C[i*Width+j] = sum;
}
}
}
}
}
void initializeArrayRand2D(float *arr, int len, int seed);
void initializeArrayOrdered2D(float *arr, int len);
// Matrix multiplication kernel
// per thread code
__global__ void MatrixMulKernelGlobal(float* Md , float* Nd , float* Pd, int Width) {
// Pvalueis used to store the element of the
// matrix that is computed by the thread
//cuPrintf("%f\n", Md[threadIdx.y*Width+threadIdx.x]);
int Row = blockIdx.y*(ARR_SIZE/NUM_BLOCKS) + threadIdx.y;
int Col = blockIdx.x*(ARR_SIZE/NUM_BLOCKS) + threadIdx.x;
float Pvalue = 0;
for (int k = 0; k < Width; ++k) {
// float Melement = Md [threadIdx.y*Width+k];
// float Nelement = Nd [k*Width+threadIdx.x];
Pvalue += Md[Row*Width+k] * Nd[k*Width+Col];
}
//cuPrintf("%f\n", Pvalue);
Pd[Row*Width+Col] = Pvalue;
}
__global__ void MatrixMulKernelShared(float* Md, float* Nd, float* Pd, int Width) {
const int tile_width = ARR_SIZE/NUM_BLOCKS;
__shared__ float Mds[tile_width][tile_width]; // Shared memory
__shared__ float Nds[tile_width][tile_width]; // declarations
int bx = blockIdx.x; int by = blockIdx.y; // ID thread
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * tile_width + ty;
int Col = bx * tile_width + tx;
float Pvalue = 0; // REGISTER!
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width/tile_width; ++m) {
// Collaborative loading of Md and Nd tiles into shared memory
Mds[ty][tx] = Md[Row*Width + (m*tile_width + tx)];
Nds[ty][tx] = Nd[Col + (m*tile_width + ty)*Width];
__syncthreads();
for (int k = 0; k < tile_width; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
//from https://github.com/tpn/cuda-samples/blob/master/v8.0/0_Simple/matrixMul_nvrtc/matrixMul_kernel.cu
__global__ void matrixMulCUDA(float *C, float *A, float *B, int width)
{
const int BLOCK_SIZE = ARR_SIZE/NUM_BLOCKS;
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = width * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + width - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * width;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + width * ty + tx];
Bs[ty][tx] = B[b + width * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = width * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + width * ty + tx] = Csub;
}
int main(int argc, char **argv){
// GPU Timing variables
cudaEvent_t startOuter, startInner, /*startSerial,*/ stopOuter, stopInner/*, stopSerial*/;
float elapsed_gpu_outer, elapsed_gpu_inner/*, elapsed_serial*/;
// Arrays on GPU global memoryc
float *d_x;
float *d_y;
float *d_result;
// Arrays on the host memory
float *h_x;
float *h_y;
float *h_result;
//float *h_serial;
//int i, j, errCount = 0, zeroCount = 0;
/*
if (argc > 1) {
ARR_SIZE = atoi(argv[1]);
}
else {
ARR_SIZE = SM_ARR_LEN;
}*/
fprintf(stderr, "Length of the array = %d\n", ARR_SIZE);
// Select GPU
CUDA_SAFE_CALL(cudaSetDevice(0));
// Allocate GPU memory
size_t allocSize = ARR_SIZE * ARR_SIZE * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_x, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_y, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_result, allocSize));
// Allocate arrays on host memory
h_x = (float *) malloc(allocSize);
h_y = (float *) malloc(allocSize);
h_result = (float *) malloc(allocSize);
//h_serial = (float *) malloc(allocSize);
// Initialize the host arrays
fprintf(stderr, "\nInitializing the arrays ...");
// Arrays are initialized with a known seed for reproducability
initializeArrayOrdered2D(h_x, ARR_SIZE);
initializeArrayOrdered2D(h_y, ARR_SIZE);
//initializeArray1D(h_y, ARR_SIZE, 1467);
fprintf(stderr, "\t... done\n");
fprintf(stderr, "Creating cuda events ...");
#if PRINT_TIME
// Create the cuda events
cudaEventCreate(&startOuter);
cudaEventCreate(&startInner);
cudaEventCreate(&stopOuter);
cudaEventCreate(&stopInner);
// Record event on the default stream
cudaEventRecord(startOuter, 0);
#endif
//fprintf(stderr, "\t... done\n");
//fprintf(stderr, "Transferring arrays to GPU memory ...");
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(cudaMemcpy(d_x, h_x, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_y, h_y, allocSize, cudaMemcpyHostToDevice));
dim3 dimGrid(NUM_BLOCKS, NUM_BLOCKS);
dim3 dimBlock(ARR_SIZE/NUM_BLOCKS, ARR_SIZE/NUM_BLOCKS);
//fprintf(stderr, "\t... done\n");
// printf("Launching kernel");
// Launch the kernel
//cudaPrintfInit();
//fprintf(stderr, "Kernel initialized\n");
#if PRINT_TIME
cudaEventRecord(startInner, 0);
#endif
MatrixMulKernelShared<<<dimGrid, dimBlock>>>(d_x, d_y, d_result, ARR_SIZE);
//matrixMulCUDA<<<dimGrid, dimBlock>>>(d_x, d_y, d_result, ARR_SIZE);
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
// Check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(cudaMemcpy(h_result, d_result, allocSize, cudaMemcpyDeviceToHost));
#if PRINT_TIME
// Stop and destroy the timer
cudaEventRecord(stopOuter,0);
cudaEventRecord(stopInner,0);
cudaEventSynchronize(stopOuter);
cudaEventSynchronize(stopInner);
cudaEventElapsedTime(&elapsed_gpu_outer, startOuter, stopOuter);
cudaEventElapsedTime(&elapsed_gpu_inner, startInner, stopInner);
//printf("\nGPU time (start-to-finish): %f (msec)\n", elapsed_gpu_outer);
//printf("GPU time (kernel only): %f (msec)\n", elapsed_gpu_inner);
printf("%f", elapsed_gpu_outer/1000.0);
cudaEventDestroy(startOuter);
cudaEventDestroy(startInner);
cudaEventDestroy(stopOuter);
cudaEventDestroy(stopInner);
#endif
fprintf(stderr, "Grid size: %d\nBlock size: %d\n", NUM_BLOCKS, ARR_SIZE/NUM_BLOCKS);
float checksumGPU = 0;
//float checksumSerial = 0;
// get checksum
checksumGPU = getChecksum(h_result, ARR_SIZE);
if (ARR_SIZE <= 8) {
fprintf(stderr, "\n");
printMat(h_result, ARR_SIZE);
}
fprintf(stderr, "Checksum: %f\n", checksumGPU);
/*
cudaEventCreate(&startSerial);
cudaEventCreate(&stopSerial);
cudaEventRecord(startSerial, 0);
MatrixMulOnHostBlocked(h_x, h_y, h_serial, ARR_SIZE);
cudaEventRecord(stopSerial,0);
cudaEventSynchronize(stopSerial);
cudaEventElapsedTime(&elapsed_serial, startSerial, stopSerial);
printf("Blocked serial time : %f (msec)\n", elapsed_serial);
cudaEventDestroy(startSerial);
cudaEventDestroy(stopSerial);
checksumSerial = getChecksum(h_serial, ARR_SIZE);
printf("Serial checksum: %f\n", checksumSerial);
double maxDiff = 0.0;
double diff;
// Compare the results
for(i = 0; i < ARR_SIZE; i++) {
for(j = 0; j < ARR_SIZE; j++) {
diff = fabs(h_result[i*ARR_SIZE+j] - h_serial[i*ARR_SIZE+j]);
if (diff > maxDiff) {
//printf("%f - %f = %f", h_result[i*ARR_SIZE+j], h_serial[i*ARR_SIZE+j], diff);
maxDiff = diff;
}
}
}
printf("Maximum difference: %f\n", maxDiff);
*/
/*
for(i = 0; i < 50; i++) {
printf("%d:\t%.8f\t%.8f\n", i, h_result_gold[i], h_result[i]);
}
*/
/*
if (errCount > 0) {
fprintf(stderr, "\n@ERROR: TEST FAILED: %d results did not matched\n", errCount);
}
else if (zeroCount > 0){
fprintf(stderr, "\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount);
}
else {
fprintf(stderr, "\nTEST PASSED: All results matched\n");
}
*/
// Free-up device and host memory
CUDA_SAFE_CALL(cudaFree(d_x));
CUDA_SAFE_CALL(cudaFree(d_y));
CUDA_SAFE_CALL(cudaFree(d_result));
free(h_x);
free(h_y);
free(h_result);
//free(h_serial);
return 0;
}
void initializeArrayRand2D(float *arr, int len, int seed) {
int i, j;
float randNum;
srand(seed);
for (i = 0; i < len; i++) {
for (j = 0; j < len; j++) {
randNum = (float) rand() / (float)(RAND_MAX/len);
//printf("%f\n", randNum);
arr[i * len + j] = randNum;
}
}
}
void initializeArrayOrdered2D(float *arr, int len) {
long int i;
for (i = 0; i < len*len; i++) {
arr[i] = (float)i;
}
}
|
870a1657f1c3902c7d6c9512867fbf138bfa335b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "optixtutorial.h"
struct IntersectionInfo
{
optix::float3 normal;
optix::float2 texcoord;
optix::float3 intersectionPoint;
optix::float3 vectorToLight;
};
rtBuffer<optix::float3, 1> normal_buffer;
rtBuffer<optix::float2, 1> texcoord_buffer;
rtBuffer<optix::uchar4, 2> output_buffer;
rtDeclareVariable( optix::float3, diffuse, , "diffuse" );
rtDeclareVariable(optix::float3, specular, , "specular");
rtDeclareVariable(optix::float3, ambient, , "ambient");
rtDeclareVariable(float, shininess, , "shininess");
rtDeclareVariable(int, tex_diffuse_id, , "diffuse texture id");
rtDeclareVariable( rtObject, top_object, , );
rtDeclareVariable( uint2, launch_dim, rtLaunchDim, );
rtDeclareVariable( uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable( PerRayData_radiance, ray_data, rtPayload, );
rtDeclareVariable( PerRayData_shadow, shadow_ray_data, rtPayload, );
rtDeclareVariable( float2, barycentrics, attribute rtTriangleBarycentrics, );
rtDeclareVariable(optix::Ray, ray, rtCurrentRay, "current ray");
rtDeclareVariable(IntersectionInfo, hitInfo, attribute attributes, "Intersection info");
rtDeclareVariable(optix::float3, view_from, , );
rtDeclareVariable(optix::Matrix3x3, M_c_w, , "camera to worldspace transformation matrix" );
rtDeclareVariable(float, focal_length, , "focal length in pixels" );
RT_PROGRAM void attribute_program( void )
{
const optix::float3 lightPossition = optix::make_float3(50, 0, 120);
const optix::float2 barycentrics = rtGetTriangleBarycentrics();
const unsigned int index = rtGetPrimitiveIndex();
const optix::float3 n0 = normal_buffer[index * 3 + 0];
const optix::float3 n1 = normal_buffer[index * 3 + 1];
const optix::float3 n2 = normal_buffer[index * 3 + 2];
const optix::float2 t0 = texcoord_buffer[index * 3 + 0];
const optix::float2 t1 = texcoord_buffer[index * 3 + 1];
const optix::float2 t2 = texcoord_buffer[index * 3 + 2];
hitInfo.normal = optix::normalize(n1 * barycentrics.x + n2 * barycentrics.y + n0 * (1.0f - barycentrics.x - barycentrics.y));
hitInfo.texcoord = t1 * barycentrics.x + t2 * barycentrics.y + t0 * (1.0f - barycentrics.x - barycentrics.y);
if (optix::dot(ray.direction, hitInfo.normal) > 0) {
hitInfo.normal *= -1;
}
hitInfo.intersectionPoint = optix::make_float3(ray.origin.x + ray.tmax * ray.direction.x,
ray.origin.y + ray.tmax * ray.direction.y,
ray.origin.z + ray.tmax * ray.direction.z);
hitInfo.vectorToLight = optix::normalize(lightPossition - hitInfo.intersectionPoint);
}
RT_PROGRAM void primary_ray( void )
{
PerRayData_radiance prd;
hiprandState_t state;
prd.state = &state;
hiprand_init(launch_index.x + launch_dim.x * launch_index.y, 0, 0, prd.state);
int ANTI_ALIASING_SAMPLES = 8;
int NO_SAMPLES = 8;
optix::float3 resultColor = optix::make_float3(0.0f, 0.0f, 0.0f);
for (int i = 0; i < ANTI_ALIASING_SAMPLES; i++)
{
float randomX = hiprand_uniform(prd.state);
float randomY = hiprand_uniform(prd.state);
const optix::float3 d_c = make_float3(launch_index.x - launch_dim.x * 0.5f + randomX,
output_buffer.size().y * 0.5f - launch_index.y + randomY,
-focal_length);
const optix::float3 d_w = optix::normalize(M_c_w * d_c);
optix::Ray ray(view_from, d_w, 0, 0.01f);
optix::float3 ambientColor = optix::make_float3(0.0f, 0.0f, 0.0f);
for (int j = 0; j < NO_SAMPLES; j++) {
rtTrace(top_object, ray, prd);
ambientColor += prd.result;
}
ambientColor /= NO_SAMPLES;
resultColor += ambientColor;
}
resultColor /= ANTI_ALIASING_SAMPLES;
output_buffer[launch_index] = optix::make_uchar4(resultColor.x*255.0f, resultColor.y*255.0f, resultColor.z*255.0f, 255 );
}
RT_PROGRAM void closest_hit_normal_shader( void )
{
optix::float3 normal = hitInfo.normal;
ray_data.result = optix::make_float3((normal.x + 1) / 2, (normal.y + 1) / 2, (normal.z + 1) / 2);
}
RT_PROGRAM void closest_hit_lambert_shader(void)
{
float normalLigthScalarProduct = optix::dot(hitInfo.vectorToLight, hitInfo.normal);
ray_data.result = getDiffuseColor() * normalLigthScalarProduct * getAmbientColor();
}
RT_PROGRAM void closest_hit_phong_shader(void)
{
float normalLigthScalarProduct = optix::dot(hitInfo.vectorToLight, hitInfo.normal);
optix::float3 lr = 2 * (normalLigthScalarProduct)* hitInfo.normal - hitInfo.vectorToLight;
//optix::float3 reflectedColor = reflect();
ray_data.result = ambient + (getDiffuseColor() * normalLigthScalarProduct) + specular * pow(/*reflectedColor.x **/ optix::clamp(optix::dot(-ray.direction, lr), 0.0f, 1.0f), shininess);
ray_data.result = ray_data.result * getAmbientColor();
}
RT_PROGRAM void closest_hit_glass_shader(void)
{
}
RT_PROGRAM void closest_hit_pbr_shader(void)
{
}
RT_PROGRAM void closest_hit_mirror_shader(void)
{
}
RT_PROGRAM void any_hit(void)
{
shadow_ray_data.visible.x = 0;
rtTerminateRay();
}
RT_PROGRAM void miss_program( void )
{
ray_data.result = optix::make_float3( 0.0f, 0.0f, 0.0f );
}
RT_PROGRAM void exception( void )
{
const unsigned int code = rtGetExceptionCode();
rtPrintf( "Exception 0x%X at (%d, %d)\n", code, launch_index.x, launch_index.y );
rtPrintExceptionDetails();
output_buffer[launch_index] = uchar4{ 255, 0, 255, 0 };
}
__device__ optix::float3 sampleHemisphere(optix::float3 normal, hiprandState_t* state, float& pdf) {
float randomU = hiprand_uniform(state);
float randomV = hiprand_uniform(state);
float x = cosf(2 * CUDART_PI_F * randomU) * sqrtf(1 - randomV);
float y = sinf(2 * CUDART_PI_F * randomU) * sqrtf(1 - randomV);
float z = sqrtf(randomV);
optix::float3 O1 = optix::normalize(orthogonal(normal));
optix::float3 O2 = optix::normalize(optix::cross(normal, O1));
optix::Matrix3x3 transformationMatrix = optix::make_matrix3x3(optix::Matrix<4, 4>::fromBasis(O1, O2, normal, optix::make_float3(0.0f, 0.0f, 0.0f) ));
optix::float3 omegai = optix::make_float3(x, y, z);
omegai = optix::normalize(transformationMatrix * omegai);
pdf = optix::dot(normal, omegai) / CUDART_PI_F;
return omegai;
}
__device__ optix::float3 orthogonal(const optix::float3 & v)
{
return (abs(v.x) > abs(v.z)) ? optix::make_float3(-v.y, v.x, 0.0f) : optix::make_float3(0.0f, -v.z, v.y);
}
__device__ optix::float3 getAmbientColor()
{
float pdf = 0;
optix::float3 omegai = sampleHemisphere(hitInfo.normal, ray_data.state, pdf);
optix::Ray ray(hitInfo.intersectionPoint, omegai, 1, 0.01f);
PerRayData_shadow shadow_ray;
shadow_ray.visible.x = 1;
rtTrace(top_object, ray, shadow_ray);
optix::float3 whiteColor = optix::make_float3(1, 1, 1);
return whiteColor * optix::dot(hitInfo.normal, omegai) * shadow_ray.visible.x / CUDART_PI_F / pdf;
}
__device__ optix::float3 getDiffuseColor()
{
optix::float3 color;
if (tex_diffuse_id != -1) {
const optix::float4 value = optix::rtTex2D<optix::float4>(tex_diffuse_id, hitInfo.texcoord.x, 1 - hitInfo.texcoord.y);
color = optix::make_float3(value.x, value.y, value.z);
}
else {
color = diffuse;
}
return color;
}
__device__ optix::float3 reflect()
{
PerRayData_radiance prd;
optix::float3 reflected = 2 * (hitInfo.normal * ray.direction) * hitInfo.normal - ray.direction;
optix::Ray ray(ray.origin, reflected, 0, 0.01f);
rtTrace(top_object, ray, prd);
return ray_data.result;
}
|
870a1657f1c3902c7d6c9512867fbf138bfa335b.cu
|
#include "optixtutorial.h"
struct IntersectionInfo
{
optix::float3 normal;
optix::float2 texcoord;
optix::float3 intersectionPoint;
optix::float3 vectorToLight;
};
rtBuffer<optix::float3, 1> normal_buffer;
rtBuffer<optix::float2, 1> texcoord_buffer;
rtBuffer<optix::uchar4, 2> output_buffer;
rtDeclareVariable( optix::float3, diffuse, , "diffuse" );
rtDeclareVariable(optix::float3, specular, , "specular");
rtDeclareVariable(optix::float3, ambient, , "ambient");
rtDeclareVariable(float, shininess, , "shininess");
rtDeclareVariable(int, tex_diffuse_id, , "diffuse texture id");
rtDeclareVariable( rtObject, top_object, , );
rtDeclareVariable( uint2, launch_dim, rtLaunchDim, );
rtDeclareVariable( uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable( PerRayData_radiance, ray_data, rtPayload, );
rtDeclareVariable( PerRayData_shadow, shadow_ray_data, rtPayload, );
rtDeclareVariable( float2, barycentrics, attribute rtTriangleBarycentrics, );
rtDeclareVariable(optix::Ray, ray, rtCurrentRay, "current ray");
rtDeclareVariable(IntersectionInfo, hitInfo, attribute attributes, "Intersection info");
rtDeclareVariable(optix::float3, view_from, , );
rtDeclareVariable(optix::Matrix3x3, M_c_w, , "camera to worldspace transformation matrix" );
rtDeclareVariable(float, focal_length, , "focal length in pixels" );
RT_PROGRAM void attribute_program( void )
{
const optix::float3 lightPossition = optix::make_float3(50, 0, 120);
const optix::float2 barycentrics = rtGetTriangleBarycentrics();
const unsigned int index = rtGetPrimitiveIndex();
const optix::float3 n0 = normal_buffer[index * 3 + 0];
const optix::float3 n1 = normal_buffer[index * 3 + 1];
const optix::float3 n2 = normal_buffer[index * 3 + 2];
const optix::float2 t0 = texcoord_buffer[index * 3 + 0];
const optix::float2 t1 = texcoord_buffer[index * 3 + 1];
const optix::float2 t2 = texcoord_buffer[index * 3 + 2];
hitInfo.normal = optix::normalize(n1 * barycentrics.x + n2 * barycentrics.y + n0 * (1.0f - barycentrics.x - barycentrics.y));
hitInfo.texcoord = t1 * barycentrics.x + t2 * barycentrics.y + t0 * (1.0f - barycentrics.x - barycentrics.y);
if (optix::dot(ray.direction, hitInfo.normal) > 0) {
hitInfo.normal *= -1;
}
hitInfo.intersectionPoint = optix::make_float3(ray.origin.x + ray.tmax * ray.direction.x,
ray.origin.y + ray.tmax * ray.direction.y,
ray.origin.z + ray.tmax * ray.direction.z);
hitInfo.vectorToLight = optix::normalize(lightPossition - hitInfo.intersectionPoint);
}
RT_PROGRAM void primary_ray( void )
{
PerRayData_radiance prd;
curandState_t state;
prd.state = &state;
curand_init(launch_index.x + launch_dim.x * launch_index.y, 0, 0, prd.state);
int ANTI_ALIASING_SAMPLES = 8;
int NO_SAMPLES = 8;
optix::float3 resultColor = optix::make_float3(0.0f, 0.0f, 0.0f);
for (int i = 0; i < ANTI_ALIASING_SAMPLES; i++)
{
float randomX = curand_uniform(prd.state);
float randomY = curand_uniform(prd.state);
const optix::float3 d_c = make_float3(launch_index.x - launch_dim.x * 0.5f + randomX,
output_buffer.size().y * 0.5f - launch_index.y + randomY,
-focal_length);
const optix::float3 d_w = optix::normalize(M_c_w * d_c);
optix::Ray ray(view_from, d_w, 0, 0.01f);
optix::float3 ambientColor = optix::make_float3(0.0f, 0.0f, 0.0f);
for (int j = 0; j < NO_SAMPLES; j++) {
rtTrace(top_object, ray, prd);
ambientColor += prd.result;
}
ambientColor /= NO_SAMPLES;
resultColor += ambientColor;
}
resultColor /= ANTI_ALIASING_SAMPLES;
output_buffer[launch_index] = optix::make_uchar4(resultColor.x*255.0f, resultColor.y*255.0f, resultColor.z*255.0f, 255 );
}
RT_PROGRAM void closest_hit_normal_shader( void )
{
optix::float3 normal = hitInfo.normal;
ray_data.result = optix::make_float3((normal.x + 1) / 2, (normal.y + 1) / 2, (normal.z + 1) / 2);
}
RT_PROGRAM void closest_hit_lambert_shader(void)
{
float normalLigthScalarProduct = optix::dot(hitInfo.vectorToLight, hitInfo.normal);
ray_data.result = getDiffuseColor() * normalLigthScalarProduct * getAmbientColor();
}
RT_PROGRAM void closest_hit_phong_shader(void)
{
float normalLigthScalarProduct = optix::dot(hitInfo.vectorToLight, hitInfo.normal);
optix::float3 lr = 2 * (normalLigthScalarProduct)* hitInfo.normal - hitInfo.vectorToLight;
//optix::float3 reflectedColor = reflect();
ray_data.result = ambient + (getDiffuseColor() * normalLigthScalarProduct) + specular * pow(/*reflectedColor.x **/ optix::clamp(optix::dot(-ray.direction, lr), 0.0f, 1.0f), shininess);
ray_data.result = ray_data.result * getAmbientColor();
}
RT_PROGRAM void closest_hit_glass_shader(void)
{
}
RT_PROGRAM void closest_hit_pbr_shader(void)
{
}
RT_PROGRAM void closest_hit_mirror_shader(void)
{
}
RT_PROGRAM void any_hit(void)
{
shadow_ray_data.visible.x = 0;
rtTerminateRay();
}
RT_PROGRAM void miss_program( void )
{
ray_data.result = optix::make_float3( 0.0f, 0.0f, 0.0f );
}
RT_PROGRAM void exception( void )
{
const unsigned int code = rtGetExceptionCode();
rtPrintf( "Exception 0x%X at (%d, %d)\n", code, launch_index.x, launch_index.y );
rtPrintExceptionDetails();
output_buffer[launch_index] = uchar4{ 255, 0, 255, 0 };
}
__device__ optix::float3 sampleHemisphere(optix::float3 normal, curandState_t* state, float& pdf) {
float randomU = curand_uniform(state);
float randomV = curand_uniform(state);
float x = cosf(2 * CUDART_PI_F * randomU) * sqrtf(1 - randomV);
float y = sinf(2 * CUDART_PI_F * randomU) * sqrtf(1 - randomV);
float z = sqrtf(randomV);
optix::float3 O1 = optix::normalize(orthogonal(normal));
optix::float3 O2 = optix::normalize(optix::cross(normal, O1));
optix::Matrix3x3 transformationMatrix = optix::make_matrix3x3(optix::Matrix<4, 4>::fromBasis(O1, O2, normal, optix::make_float3(0.0f, 0.0f, 0.0f) ));
optix::float3 omegai = optix::make_float3(x, y, z);
omegai = optix::normalize(transformationMatrix * omegai);
pdf = optix::dot(normal, omegai) / CUDART_PI_F;
return omegai;
}
__device__ optix::float3 orthogonal(const optix::float3 & v)
{
return (abs(v.x) > abs(v.z)) ? optix::make_float3(-v.y, v.x, 0.0f) : optix::make_float3(0.0f, -v.z, v.y);
}
__device__ optix::float3 getAmbientColor()
{
float pdf = 0;
optix::float3 omegai = sampleHemisphere(hitInfo.normal, ray_data.state, pdf);
optix::Ray ray(hitInfo.intersectionPoint, omegai, 1, 0.01f);
PerRayData_shadow shadow_ray;
shadow_ray.visible.x = 1;
rtTrace(top_object, ray, shadow_ray);
optix::float3 whiteColor = optix::make_float3(1, 1, 1);
return whiteColor * optix::dot(hitInfo.normal, omegai) * shadow_ray.visible.x / CUDART_PI_F / pdf;
}
__device__ optix::float3 getDiffuseColor()
{
optix::float3 color;
if (tex_diffuse_id != -1) {
const optix::float4 value = optix::rtTex2D<optix::float4>(tex_diffuse_id, hitInfo.texcoord.x, 1 - hitInfo.texcoord.y);
color = optix::make_float3(value.x, value.y, value.z);
}
else {
color = diffuse;
}
return color;
}
__device__ optix::float3 reflect()
{
PerRayData_radiance prd;
optix::float3 reflected = 2 * (hitInfo.normal * ray.direction) * hitInfo.normal - ray.direction;
optix::Ray ray(ray.origin, reflected, 0, 0.01f);
rtTrace(top_object, ray, prd);
return ray_data.result;
}
|
3c453453234a7def684c4412f3bc6cbae97011fe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
#define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#define IN
#define OUT
#define INOUT
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
hipEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(hipEventCreate(&cuda_timer_start));
CUDA_CALL(hipEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(hipEventDestroy(cuda_timer_start));
CUDA_CALL(hipEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
hipEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
hipEventRecord(cuda_timer_stop, CUDA_STREAM_0);
hipEventSynchronize(cuda_timer_stop);
hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
#define N_SIZE (1 << 26) //
#define NF_SIZE 64 // Nf
#define BLOCK_SIZE 64 // CUDA thread block
#define BLOCK_WIDTH (1 << 3)
#define BLOCK_HEIGHT (BLOCK_SIZE / BLOCK_WIDTH)
#define N_ITERATION 1 //
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
int N;
int Nf;
int *h_ArrayElements;
int *h_SumOfArrayElements_CPU;
int *h_SumOfArrayElements_GPU;
hipError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
/*Todo*/
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int tid = gridDim.x * blockDim.x * row + col;
int j;
int start, end;
int sum;
start = tid - Nf;
end = tid + Nf;
if (start < 0)//0 0 0
start = 0;
if (end >= N)//N N-1 (p_ArrayElement index: 0~N-1)
end = N - 1;
sum = 0;
for (j = start; j <= end; j++)
sum += d_ArrayElements[j];//tid-NF tid+Nf
d_SumOfArrayElements[tid] = sum;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf C
// GPU kernel
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Sum_n_elements_CPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_CPU, int Nf) {
/*Todo*/
int i, j, k;
int start, end;
int sum;
for (i = 0; i < N; i++)
{
start=i-Nf;
end=i+Nf;
if (start < 0)//0 0 0
start = 0;
if (end >= N)//N N-1 (p_ArrayElement index: 0~N-1)
end = N - 1;
sum = 0;
for (j = start; j <= end; j++)
sum += p_ArrayElements[j];//i-NF i+Nf
p_SumOfElements_CPU[i] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// bin
// 4 , 4 Nf , N int
// -100 ~ 100
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void read_bin_file() {
printf("***Binary File Read Start!!\n");
FILE *fp = fopen("Cuda_HW3_input.bin", "rb");
fread(&N, sizeof(int), 1, fp);
fread(&Nf, sizeof(int), 1, fp);
h_ArrayElements = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_CPU = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU = (int *)malloc(N * sizeof(int));
fread(h_ArrayElements, sizeof(int), N, fp);
fclose(fp);
printf("***Binary File Read End!!\n\n");
}
void init_bin_file(IN int n, IN int nf) {
printf("***Binary File Create Start!!\n");
srand((unsigned)time(NULL));
FILE *fp = fopen("Cuda_HW3_input.bin", "wb");
fwrite(&n, sizeof(int), 1, fp);
fwrite(&nf, sizeof(int), 1, fp);
int i, input;
for (i = 0; i < n; i++) {
input = (int)((float)rand() / RAND_MAX * 200 - 100);
fwrite(&input, sizeof(int), 1, fp);
}
fclose(fp);
printf("***Binary File Create End!!\n\n");
}
int main()
{
int i;
init_bin_file(N_SIZE, NF_SIZE);
read_bin_file();
TIMER_T CPU_time = 0.0f, GPU_time_NO_SHARED = 0.0f;
for (i = 0; i < N_ITERATION; i++) {
CHECK_TIME_START;
Sum_n_elements_CPU(h_ArrayElements, h_SumOfArrayElements_CPU, Nf);
CHECK_TIME_END(compute_time);
CPU_time += compute_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU, Nf);
GPU_time_NO_SHARED += device_time;
}
for (i = 0; i < N; i++) {
if (h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU[i]) {
printf("%d : CPU : %d,\tGPU : %d\n", i, h_SumOfArrayElements_CPU[i], h_SumOfArrayElements_GPU[i]);
break;
}
}
if (i == N)
printf("***Kernel execution Success!!\n\n");
printf("***CPU compute time : %.3f ms\n", CPU_time / N_ITERATION);
printf("***GPU NO SHARED compute time : %.3f ms\n", GPU_time_NO_SHARED / N_ITERATION);
printf("\n***Binary File Write Start!!\n");
FILE* fp = fopen("Cuda_HW3_output.bin", "wb");
if (!fp) {
fprintf(stderr, "Error: cannot open the output file...\n");
exit(-1);
}
fwrite(&N, sizeof(int), 1, fp);
fwrite(&Nf, sizeof(int), 1, fp);
for (i = 0; i < N; i++)
fwrite(&h_SumOfArrayElements_GPU[i], sizeof(int), 1, fp);
fclose(fp);
printf("***Binary File Write End!!\n");
free(h_ArrayElements);
free(h_SumOfArrayElements_CPU);
free(h_SumOfArrayElements_GPU);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
//
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
hipError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf) {
CHECK_TIME_INIT_GPU();
hipError_t cudaStatus;
/*Todo*/
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
int* d_ArrayElements;
int* d_SumOfElements_GPU;
size_t size = N * sizeof(int);
CUDA_CALL(hipMalloc(&d_ArrayElements, size))//gpup_ArrayElements .
CUDA_CALL(hipMemcpy(d_ArrayElements, p_ArrayElements, size, hipMemcpyHostToDevice))//gpu(d_ArrayElements) p_ArrayElements copy.
CUDA_CALL(hipMalloc(&d_SumOfElements_GPU, size))// gpu memory .
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE);//block dimension 1, block size
dim3 dimGrid(N / BLOCK_SIZE);//n/block_size grid dimension
CHECK_TIME_START_GPU()
Sum_n_elements_Kernel << < dimGrid, dimBlock >> > (d_ArrayElements, d_SumOfElements_GPU, N, Nf);//kernel
CHECK_TIME_END_GPU(device_time)
CUDA_CALL(hipGetLastError())
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(hipDeviceSynchronize())
CUDA_CALL(hipMemcpy(p_SumOfElements_GPU, d_SumOfElements_GPU, size, hipMemcpyDeviceToHost))//gpu
Error:
hipFree(d_ArrayElements);
hipFree(d_SumOfElements_GPU);
CHECK_TIME_DEST_GPU();
return cudaStatus;
}
|
3c453453234a7def684c4412f3bc6cbae97011fe.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
#define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#define IN
#define OUT
#define INOUT
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
cudaEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(cudaEventCreate(&cuda_timer_start));
CUDA_CALL(cudaEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(cudaEventDestroy(cuda_timer_start));
CUDA_CALL(cudaEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
cudaEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0);
cudaEventSynchronize(cuda_timer_stop);
cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
#define N_SIZE (1 << 26) // 전체 데이터 사이즈
#define NF_SIZE 64 // Nf 크기
#define BLOCK_SIZE 64 // CUDA 커널 thread block 사이즈
#define BLOCK_WIDTH (1 << 3)
#define BLOCK_HEIGHT (BLOCK_SIZE / BLOCK_WIDTH)
#define N_ITERATION 1 // 실험 반복 횟수
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
int N;
int Nf;
int *h_ArrayElements;
int *h_SumOfArrayElements_CPU;
int *h_SumOfArrayElements_GPU;
cudaError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 커널 코드
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
/*Todo*/
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int tid = gridDim.x * blockDim.x * row + col;
int j;
int start, end;
int sum;
start = tid - Nf;
end = tid + Nf;
if (start < 0)//0보다 작으면 0으로 바꾸어줘 0부터 시작
start = 0;
if (end >= N)//N보다 크거나 같으면 N-1로 바꾸어줌(p_ArrayElement의 index: 0~N-1)
end = N - 1;
sum = 0;
for (j = start; j <= end; j++)
sum += d_ArrayElements[j];//tid-NF부터 tid+Nf까지의 합 구함
d_SumOfArrayElements[tid] = sum;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 C 코드
// GPU kernel의 결과와 비교를 통해 옳은 계산을 하였는지 판단하는 데이터로 활용
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Sum_n_elements_CPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_CPU, int Nf) {
/*Todo*/
int i, j, k;
int start, end;
int sum;
for (i = 0; i < N; i++)
{
start=i-Nf;
end=i+Nf;
if (start < 0)//0보다 작으면 0으로 바꾸어줘 0부터 시작
start = 0;
if (end >= N)//N보다 크거나 같으면 N-1로 바꾸어줌(p_ArrayElement의 index: 0~N-1)
end = N - 1;
sum = 0;
for (j = start; j <= end; j++)
sum += p_ArrayElements[j];//i-NF부터 i+Nf까지의 합 구함
p_SumOfElements_CPU[i] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 주어진 bin 파일을 읽는 코드
// 첫 4바이트는 전체 데이터의 개수, 다음 4바이트는 Nf의 크기, 그 이후 N개의 int형 데이터가 저장
// 데이터는 -100 ~ 100 까지의 범위 안의 정수
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void read_bin_file() {
printf("***Binary File Read Start!!\n");
FILE *fp = fopen("Cuda_HW3_input.bin", "rb");
fread(&N, sizeof(int), 1, fp);
fread(&Nf, sizeof(int), 1, fp);
h_ArrayElements = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_CPU = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU = (int *)malloc(N * sizeof(int));
fread(h_ArrayElements, sizeof(int), N, fp);
fclose(fp);
printf("***Binary File Read End!!\n\n");
}
void init_bin_file(IN int n, IN int nf) {
printf("***Binary File Create Start!!\n");
srand((unsigned)time(NULL));
FILE *fp = fopen("Cuda_HW3_input.bin", "wb");
fwrite(&n, sizeof(int), 1, fp);
fwrite(&nf, sizeof(int), 1, fp);
int i, input;
for (i = 0; i < n; i++) {
input = (int)((float)rand() / RAND_MAX * 200 - 100);
fwrite(&input, sizeof(int), 1, fp);
}
fclose(fp);
printf("***Binary File Create End!!\n\n");
}
int main()
{
int i;
init_bin_file(N_SIZE, NF_SIZE);
read_bin_file();
TIMER_T CPU_time = 0.0f, GPU_time_NO_SHARED = 0.0f;
for (i = 0; i < N_ITERATION; i++) {
CHECK_TIME_START;
Sum_n_elements_CPU(h_ArrayElements, h_SumOfArrayElements_CPU, Nf);
CHECK_TIME_END(compute_time);
CPU_time += compute_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU, Nf);
GPU_time_NO_SHARED += device_time;
}
for (i = 0; i < N; i++) {
if (h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU[i]) {
printf("%d : CPU : %d,\tGPU : %d\n", i, h_SumOfArrayElements_CPU[i], h_SumOfArrayElements_GPU[i]);
break;
}
}
if (i == N)
printf("***Kernel execution Success!!\n\n");
printf("***CPU compute time : %.3f ms\n", CPU_time / N_ITERATION);
printf("***GPU NO SHARED compute time : %.3f ms\n", GPU_time_NO_SHARED / N_ITERATION);
printf("\n***Binary File Write Start!!\n");
FILE* fp = fopen("Cuda_HW3_output.bin", "wb");
if (!fp) {
fprintf(stderr, "Error: cannot open the output file...\n");
exit(-1);
}
fwrite(&N, sizeof(int), 1, fp);
fwrite(&Nf, sizeof(int), 1, fp);
for (i = 0; i < N; i++)
fwrite(&h_SumOfArrayElements_GPU[i], sizeof(int), 1, fp);
fclose(fp);
printf("***Binary File Write End!!\n");
free(h_ArrayElements);
free(h_SumOfArrayElements_CPU);
free(h_SumOfArrayElements_GPU);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 커널을 실행하기 전 필요한 자료들 준비 및 커널을 실행할 디바이스를 설정
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cudaError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf) {
CHECK_TIME_INIT_GPU();
cudaError_t cudaStatus;
/*Todo*/
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
int* d_ArrayElements;
int* d_SumOfElements_GPU;
size_t size = N * sizeof(int);
CUDA_CALL(cudaMalloc(&d_ArrayElements, size))//gpu에p_ArrayElements을 저장하기 위하여 메모리 할당.
CUDA_CALL(cudaMemcpy(d_ArrayElements, p_ArrayElements, size, cudaMemcpyHostToDevice))//gpu(d_ArrayElements)로 p_ArrayElements copy함.
CUDA_CALL(cudaMalloc(&d_SumOfElements_GPU, size))// gpu내에 결과값이 저장될 memory 할당.
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE);//block dimension 1차원, block size
dim3 dimGrid(N / BLOCK_SIZE);//n/block_size가 grid의 dimension
CHECK_TIME_START_GPU()
Sum_n_elements_Kernel << < dimGrid, dimBlock >> > (d_ArrayElements, d_SumOfElements_GPU, N, Nf);//kernel 수행
CHECK_TIME_END_GPU(device_time)
CUDA_CALL(cudaGetLastError())
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(cudaDeviceSynchronize())
CUDA_CALL(cudaMemcpy(p_SumOfElements_GPU, d_SumOfElements_GPU, size, cudaMemcpyDeviceToHost))//gpu메모리로부터 계산결과 카피
Error:
cudaFree(d_ArrayElements);
cudaFree(d_SumOfElements_GPU);
CHECK_TIME_DEST_GPU();
return cudaStatus;
}
|
c7020783c4cd70dc676bbf8b0048c799ca8ac29a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <cuml/random_projection/rproj_c.h>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/distance/distance.hpp>
#include <raft/linalg/transpose.hpp>
#include <random>
#include <test_utils.h>
#include <vector>
namespace ML {
template <typename T, int N, int M>
class RPROJTest : public ::testing::Test {
public:
RPROJTest()
: stream(handle.get_stream()),
random_matrix1(stream),
random_matrix2(stream),
d_input(0, stream),
d_output1(0, stream),
d_output2(0, stream)
{
}
protected:
void generate_data()
{
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_real_distribution<T> dist(0, 1);
h_input.resize(N * M);
for (auto& i : h_input) {
i = dist(rng);
}
d_input.resize(h_input.size(), stream);
raft::update_device(d_input.data(), h_input.data(), h_input.size(), stream);
// transpose(d_input, d_input, N, M);
// From row major to column major (this operation is only useful for non-random datasets)
}
void gaussianTest()
{
params1 = {
N, // number of samples
M, // number of features
-1, // number of components
epsilon, // error tolerance
true, // gaussian or sparse method
-1.0, // auto density
false, // not used
42 // random seed
};
RPROJfit(handle, &random_matrix1, ¶ms1);
d_output1.resize(N * params1.n_components, stream);
rmm::device_uvector<T> tmp(d_output1.size(), stream);
RPROJtransform(handle, d_input.data(), &random_matrix1, tmp.data(), ¶ms1);
raft::linalg::transpose(handle,
tmp.data(),
d_output1.data(),
N,
params1.n_components,
stream); // From column major to row major
handle.sync_stream(stream);
}
void sparseTest()
{
params2 = {
N, // number of samples
M, // number of features
-1, // number of components (-1: auto-deduction)
epsilon, // error tolerance
false, // gaussian or sparse method
-1.0, // auto density (-1: auto-deduction)
false, // not used
42 // random seed
};
RPROJfit(handle, &random_matrix2, ¶ms2);
d_output2.resize(N * params2.n_components, stream);
rmm::device_uvector<T> tmp(d_output2.size(), stream);
RPROJtransform(handle, d_input.data(), &random_matrix2, tmp.data(), ¶ms2);
raft::linalg::transpose(handle,
tmp.data(),
d_output2.data(),
N,
params2.n_components,
stream); // From column major to row major
handle.sync_stream(stream);
}
void SetUp() override
{
epsilon = 0.2;
generate_data();
gaussianTest();
sparseTest();
}
void random_matrix_check()
{
int D = johnson_lindenstrauss_min_dim(N, epsilon);
ASSERT_TRUE(params1.n_components == D);
ASSERT_TRUE(random_matrix1.dense_data.size() > 0);
ASSERT_TRUE(random_matrix1.type == dense);
ASSERT_TRUE(params2.n_components == D);
ASSERT_TRUE(params2.density == 1 / sqrt(M));
ASSERT_TRUE(random_matrix2.indices.size() > 0);
ASSERT_TRUE(random_matrix2.indptr.size() > 0);
ASSERT_TRUE(random_matrix2.sparse_data.size() > 0);
ASSERT_TRUE(random_matrix2.type == sparse);
}
void epsilon_check()
{
int D = johnson_lindenstrauss_min_dim(N, epsilon);
constexpr auto distance_type = raft::distance::DistanceType::L2SqrtUnexpanded;
rmm::device_uvector<T> d_pdist(N * N, stream);
ML::Metrics::pairwise_distance(
handle, d_input.data(), d_input.data(), d_pdist.data(), N, N, M, distance_type);
RAFT_CUDA_TRY(hipPeekAtLastError());
T* h_pdist = new T[N * N];
raft::update_host(h_pdist, d_pdist.data(), N * N, stream);
rmm::device_uvector<T> d_pdist1(N * N, stream);
ML::Metrics::pairwise_distance(
handle, d_output1.data(), d_output1.data(), d_pdist1.data(), N, N, D, distance_type);
RAFT_CUDA_TRY(hipPeekAtLastError());
T* h_pdist1 = new T[N * N];
raft::update_host(h_pdist1, d_pdist1.data(), N * N, stream);
rmm::device_uvector<T> d_pdist2(N * N, stream);
ML::Metrics::pairwise_distance(
handle, d_output2.data(), d_output2.data(), d_pdist2.data(), N, N, D, distance_type);
RAFT_CUDA_TRY(hipPeekAtLastError());
T* h_pdist2 = new T[N * N];
raft::update_host(h_pdist2, d_pdist2.data(), N * N, stream);
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j <= i; j++) {
T pdist = h_pdist[i * N + j];
T pdist1 = h_pdist1[i * N + j];
T pdist2 = h_pdist2[i * N + j];
T lower_bound = (1.0 - epsilon) * pdist;
T upper_bound = (1.0 + epsilon) * pdist;
ASSERT_TRUE(lower_bound <= pdist1 && pdist1 <= upper_bound);
ASSERT_TRUE(lower_bound <= pdist2 && pdist2 <= upper_bound);
}
}
delete[] h_pdist;
delete[] h_pdist1;
delete[] h_pdist2;
}
protected:
raft::handle_t handle;
hipStream_t stream = 0;
T epsilon;
std::vector<T> h_input;
rmm::device_uvector<T> d_input;
paramsRPROJ params1;
rand_mat<T> random_matrix1;
rmm::device_uvector<T> d_output1;
paramsRPROJ params2;
rand_mat<T> random_matrix2;
rmm::device_uvector<T> d_output2;
};
typedef RPROJTest<float, 500, 2000> RPROJTestF1;
TEST_F(RPROJTestF1, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestF1, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<double, 500, 2000> RPROJTestD1;
TEST_F(RPROJTestD1, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestD1, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<float, 5000, 3500> RPROJTestF2;
TEST_F(RPROJTestF2, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestF2, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<double, 5000, 3500> RPROJTestD2;
TEST_F(RPROJTestD2, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestD2, EpsilonCheck) { epsilon_check(); }
} // end namespace ML
|
c7020783c4cd70dc676bbf8b0048c799ca8ac29a.cu
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <cuml/random_projection/rproj_c.h>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/distance/distance.hpp>
#include <raft/linalg/transpose.hpp>
#include <random>
#include <test_utils.h>
#include <vector>
namespace ML {
template <typename T, int N, int M>
class RPROJTest : public ::testing::Test {
public:
RPROJTest()
: stream(handle.get_stream()),
random_matrix1(stream),
random_matrix2(stream),
d_input(0, stream),
d_output1(0, stream),
d_output2(0, stream)
{
}
protected:
void generate_data()
{
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_real_distribution<T> dist(0, 1);
h_input.resize(N * M);
for (auto& i : h_input) {
i = dist(rng);
}
d_input.resize(h_input.size(), stream);
raft::update_device(d_input.data(), h_input.data(), h_input.size(), stream);
// transpose(d_input, d_input, N, M);
// From row major to column major (this operation is only useful for non-random datasets)
}
void gaussianTest()
{
params1 = {
N, // number of samples
M, // number of features
-1, // number of components
epsilon, // error tolerance
true, // gaussian or sparse method
-1.0, // auto density
false, // not used
42 // random seed
};
RPROJfit(handle, &random_matrix1, ¶ms1);
d_output1.resize(N * params1.n_components, stream);
rmm::device_uvector<T> tmp(d_output1.size(), stream);
RPROJtransform(handle, d_input.data(), &random_matrix1, tmp.data(), ¶ms1);
raft::linalg::transpose(handle,
tmp.data(),
d_output1.data(),
N,
params1.n_components,
stream); // From column major to row major
handle.sync_stream(stream);
}
void sparseTest()
{
params2 = {
N, // number of samples
M, // number of features
-1, // number of components (-1: auto-deduction)
epsilon, // error tolerance
false, // gaussian or sparse method
-1.0, // auto density (-1: auto-deduction)
false, // not used
42 // random seed
};
RPROJfit(handle, &random_matrix2, ¶ms2);
d_output2.resize(N * params2.n_components, stream);
rmm::device_uvector<T> tmp(d_output2.size(), stream);
RPROJtransform(handle, d_input.data(), &random_matrix2, tmp.data(), ¶ms2);
raft::linalg::transpose(handle,
tmp.data(),
d_output2.data(),
N,
params2.n_components,
stream); // From column major to row major
handle.sync_stream(stream);
}
void SetUp() override
{
epsilon = 0.2;
generate_data();
gaussianTest();
sparseTest();
}
void random_matrix_check()
{
int D = johnson_lindenstrauss_min_dim(N, epsilon);
ASSERT_TRUE(params1.n_components == D);
ASSERT_TRUE(random_matrix1.dense_data.size() > 0);
ASSERT_TRUE(random_matrix1.type == dense);
ASSERT_TRUE(params2.n_components == D);
ASSERT_TRUE(params2.density == 1 / sqrt(M));
ASSERT_TRUE(random_matrix2.indices.size() > 0);
ASSERT_TRUE(random_matrix2.indptr.size() > 0);
ASSERT_TRUE(random_matrix2.sparse_data.size() > 0);
ASSERT_TRUE(random_matrix2.type == sparse);
}
void epsilon_check()
{
int D = johnson_lindenstrauss_min_dim(N, epsilon);
constexpr auto distance_type = raft::distance::DistanceType::L2SqrtUnexpanded;
rmm::device_uvector<T> d_pdist(N * N, stream);
ML::Metrics::pairwise_distance(
handle, d_input.data(), d_input.data(), d_pdist.data(), N, N, M, distance_type);
RAFT_CUDA_TRY(cudaPeekAtLastError());
T* h_pdist = new T[N * N];
raft::update_host(h_pdist, d_pdist.data(), N * N, stream);
rmm::device_uvector<T> d_pdist1(N * N, stream);
ML::Metrics::pairwise_distance(
handle, d_output1.data(), d_output1.data(), d_pdist1.data(), N, N, D, distance_type);
RAFT_CUDA_TRY(cudaPeekAtLastError());
T* h_pdist1 = new T[N * N];
raft::update_host(h_pdist1, d_pdist1.data(), N * N, stream);
rmm::device_uvector<T> d_pdist2(N * N, stream);
ML::Metrics::pairwise_distance(
handle, d_output2.data(), d_output2.data(), d_pdist2.data(), N, N, D, distance_type);
RAFT_CUDA_TRY(cudaPeekAtLastError());
T* h_pdist2 = new T[N * N];
raft::update_host(h_pdist2, d_pdist2.data(), N * N, stream);
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j <= i; j++) {
T pdist = h_pdist[i * N + j];
T pdist1 = h_pdist1[i * N + j];
T pdist2 = h_pdist2[i * N + j];
T lower_bound = (1.0 - epsilon) * pdist;
T upper_bound = (1.0 + epsilon) * pdist;
ASSERT_TRUE(lower_bound <= pdist1 && pdist1 <= upper_bound);
ASSERT_TRUE(lower_bound <= pdist2 && pdist2 <= upper_bound);
}
}
delete[] h_pdist;
delete[] h_pdist1;
delete[] h_pdist2;
}
protected:
raft::handle_t handle;
cudaStream_t stream = 0;
T epsilon;
std::vector<T> h_input;
rmm::device_uvector<T> d_input;
paramsRPROJ params1;
rand_mat<T> random_matrix1;
rmm::device_uvector<T> d_output1;
paramsRPROJ params2;
rand_mat<T> random_matrix2;
rmm::device_uvector<T> d_output2;
};
typedef RPROJTest<float, 500, 2000> RPROJTestF1;
TEST_F(RPROJTestF1, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestF1, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<double, 500, 2000> RPROJTestD1;
TEST_F(RPROJTestD1, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestD1, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<float, 5000, 3500> RPROJTestF2;
TEST_F(RPROJTestF2, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestF2, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<double, 5000, 3500> RPROJTestD2;
TEST_F(RPROJTestD2, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestD2, EpsilonCheck) { epsilon_check(); }
} // end namespace ML
|
79e807fd3043f32e0ccb48cedaa1cc42370fd5ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
//#include <hip/hip_runtime.h>
#include <omp.h>
#include "config.h"
#include "cuda-util.h"
#include "mtgp-1.1/mtgp32-cuda.h"
#include "billionga.h"
//Debe ser divisible entre 32 (8 y 4)... y 512, 128???
//#define TEST_PROBLEM_SIZE 32
//#define TEST_PROBLEM_SIZE 128
//#define TEST_PROBLEM_SIZE 524288
//#define TEST_PROBLEM_SIZE 1048576
//#define TEST_PROBLEM_SIZE 2097152
//#define TEST_PROBLEM_SIZE 899999744
struct termination_criteria {
int max_iteration_count;
};
inline void termination_criteria_init(struct termination_criteria *term_state,
int max_iteration_count) {
term_state->max_iteration_count = max_iteration_count;
}
inline int termination_criteria_eval(struct termination_criteria *term_state,
struct bga_state *problem_state, int iteration_count) {
return (iteration_count == term_state->max_iteration_count);
}
int main(int argc, char **argv) {
if (argc != 5) {
fprintf(stdout, "Wrong! RFM!\n\nUsage: %s <problem size> <max iteration> <prng vector size> <gpu device>\n(where 1 <= problem size <= %ld and problem_size can be divided by 8)\n\n", argv[0], LONG_MAX);
return EXIT_FAILURE;
}
#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] === Starting... ===============================\n");
#endif
long problem_size;
problem_size = atol(argv[1]);
int max_iteration_count = atoi(argv[2]);
struct termination_criteria term_state;
termination_criteria_init(&term_state, max_iteration_count);
// === GPU.
int number_gpus = 0;
hipGetDeviceCount(&number_gpus);
if(number_gpus < 1)
{
fprintf(stderr, "[ERROR] No CUDA capable devices were detected.\n");
exit(EXIT_FAILURE);
}
int starting_gpu_device = atoi(argv[4]);
assert(starting_gpu_device >= 0 && starting_gpu_device < number_gpus);
// === PRNG.
int prng_vector_size = atoi(argv[3]);
unsigned int prng_seeds[4] = {3822712292, 495793398, 4202624243, 3503457871}; // generated with: od -vAn -N4 -tu4 < /dev/urandom
// === OpenMP
omp_set_num_threads(2);
int nthreads = omp_get_num_threads();
#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] Number of threads %d.\n", nthreads);
#endif
assert(nthreads < 4);
// === Inicializacin del cGA
struct bga_state problem_state;
bga_initialization(&problem_state, problem_size, nthreads, NUMBER_OF_SAMPLES);
int current_iteration = 0;
#pragma omp parallel // private(th_id)
{
int th_id = omp_get_thread_num();
int th_device = (starting_gpu_device + th_id) % number_gpus;
ccudaSetDevice(th_device);
#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] Thread %d using device %d.\n", th_id, th_device);
#endif
// === Inicializacin del Mersenne Twister.
mtgp32_status mt_status;
mtgp32_initialize(&mt_status, prng_vector_size, prng_seeds[th_id]);
// === Inicializacin del BillionGA.
bga_initialize_thread(&problem_state, th_id);
#if defined(DEBUG)
#pragma omp barrier
if (th_id == 0) bga_show_prob_vector_state(&problem_state);
#pragma omp barrier
#endif
while (!termination_criteria_eval(&term_state, &problem_state, current_iteration)) {
#pragma omp barrier
if (th_id == 0) {
current_iteration++;
#if defined(DEBUG)
fprintf(stdout, "*** ITERACION %d *********************************************\n", current_iteration);
#endif
}
#pragma omp barrier
bga_model_sampling_mt(&problem_state, &mt_status, th_id);
#if defined(DEBUG)
#pragma omp barrier
#endif
bga_compute_sample_part_fitness(&problem_state, th_id);
#pragma omp barrier
if (th_id == 0) {
bga_compute_sample_full_fitness(&problem_state);
}
#pragma omp barrier
bga_model_update(&problem_state, th_id);
//#if defined(DEBUG)
//if (th_id == 0) bga_show_prob_vector_state(&problem_state);
//#endif
if (termination_criteria_eval(&term_state, &problem_state, current_iteration)) {
bga_get_part_accumulated_prob(&problem_state, th_id);
}
#if !defined(DEBUG) && defined(INFO)
if (!(termination_criteria_eval(&term_state, &problem_state, current_iteration))) {
bga_get_part_accumulated_prob(&problem_state, th_id);
#pragma omp barrier
if (th_id == 0) {
if (current_iteration % 1 == 0) {
fprintf(stdout, "=== ITERACION %d ===============\n", current_iteration);
fprintf(stdout, "Accumulated probability: %.4f\n", bga_get_full_accumulated_prob(&problem_state));
}
}
#pragma omp barrier
}
#endif
}
#pragma omp barrier
if (th_id == 0) fprintf(stdout, "\n\n[FINAL] Accumulated probability: %.4f\n", bga_get_full_accumulated_prob(&problem_state));
// === Libero la memoria del Mersenne Twister.
mtgp32_free(&mt_status);
}
// === Libero la memoria del cGA.
bga_free(&problem_state);
return EXIT_SUCCESS;
}
|
79e807fd3043f32e0ccb48cedaa1cc42370fd5ce.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
//#include <cuda.h>
#include <omp.h>
#include "config.h"
#include "cuda-util.h"
#include "mtgp-1.1/mtgp32-cuda.h"
#include "billionga.h"
//Debe ser divisible entre 32 (8 y 4)... y 512, 128???
//#define TEST_PROBLEM_SIZE 32
//#define TEST_PROBLEM_SIZE 128
//#define TEST_PROBLEM_SIZE 524288
//#define TEST_PROBLEM_SIZE 1048576
//#define TEST_PROBLEM_SIZE 2097152
//#define TEST_PROBLEM_SIZE 899999744
struct termination_criteria {
int max_iteration_count;
};
inline void termination_criteria_init(struct termination_criteria *term_state,
int max_iteration_count) {
term_state->max_iteration_count = max_iteration_count;
}
inline int termination_criteria_eval(struct termination_criteria *term_state,
struct bga_state *problem_state, int iteration_count) {
return (iteration_count == term_state->max_iteration_count);
}
int main(int argc, char **argv) {
if (argc != 5) {
fprintf(stdout, "Wrong! RFM!\n\nUsage: %s <problem size> <max iteration> <prng vector size> <gpu device>\n(where 1 <= problem size <= %ld and problem_size can be divided by 8)\n\n", argv[0], LONG_MAX);
return EXIT_FAILURE;
}
#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] === Starting... ===============================\n");
#endif
long problem_size;
problem_size = atol(argv[1]);
int max_iteration_count = atoi(argv[2]);
struct termination_criteria term_state;
termination_criteria_init(&term_state, max_iteration_count);
// === GPU.
int number_gpus = 0;
cudaGetDeviceCount(&number_gpus);
if(number_gpus < 1)
{
fprintf(stderr, "[ERROR] No CUDA capable devices were detected.\n");
exit(EXIT_FAILURE);
}
int starting_gpu_device = atoi(argv[4]);
assert(starting_gpu_device >= 0 && starting_gpu_device < number_gpus);
// === PRNG.
int prng_vector_size = atoi(argv[3]);
unsigned int prng_seeds[4] = {3822712292, 495793398, 4202624243, 3503457871}; // generated with: od -vAn -N4 -tu4 < /dev/urandom
// === OpenMP
omp_set_num_threads(2);
int nthreads = omp_get_num_threads();
#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] Number of threads %d.\n", nthreads);
#endif
assert(nthreads < 4);
// === Inicialización del cGA
struct bga_state problem_state;
bga_initialization(&problem_state, problem_size, nthreads, NUMBER_OF_SAMPLES);
int current_iteration = 0;
#pragma omp parallel // private(th_id)
{
int th_id = omp_get_thread_num();
int th_device = (starting_gpu_device + th_id) % number_gpus;
ccudaSetDevice(th_device);
#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] Thread %d using device %d.\n", th_id, th_device);
#endif
// === Inicialización del Mersenne Twister.
mtgp32_status mt_status;
mtgp32_initialize(&mt_status, prng_vector_size, prng_seeds[th_id]);
// === Inicialización del BillionGA.
bga_initialize_thread(&problem_state, th_id);
#if defined(DEBUG)
#pragma omp barrier
if (th_id == 0) bga_show_prob_vector_state(&problem_state);
#pragma omp barrier
#endif
while (!termination_criteria_eval(&term_state, &problem_state, current_iteration)) {
#pragma omp barrier
if (th_id == 0) {
current_iteration++;
#if defined(DEBUG)
fprintf(stdout, "*** ITERACION %d *********************************************\n", current_iteration);
#endif
}
#pragma omp barrier
bga_model_sampling_mt(&problem_state, &mt_status, th_id);
#if defined(DEBUG)
#pragma omp barrier
#endif
bga_compute_sample_part_fitness(&problem_state, th_id);
#pragma omp barrier
if (th_id == 0) {
bga_compute_sample_full_fitness(&problem_state);
}
#pragma omp barrier
bga_model_update(&problem_state, th_id);
//#if defined(DEBUG)
//if (th_id == 0) bga_show_prob_vector_state(&problem_state);
//#endif
if (termination_criteria_eval(&term_state, &problem_state, current_iteration)) {
bga_get_part_accumulated_prob(&problem_state, th_id);
}
#if !defined(DEBUG) && defined(INFO)
if (!(termination_criteria_eval(&term_state, &problem_state, current_iteration))) {
bga_get_part_accumulated_prob(&problem_state, th_id);
#pragma omp barrier
if (th_id == 0) {
if (current_iteration % 1 == 0) {
fprintf(stdout, "=== ITERACION %d ===============\n", current_iteration);
fprintf(stdout, "Accumulated probability: %.4f\n", bga_get_full_accumulated_prob(&problem_state));
}
}
#pragma omp barrier
}
#endif
}
#pragma omp barrier
if (th_id == 0) fprintf(stdout, "\n\n[FINAL] Accumulated probability: %.4f\n", bga_get_full_accumulated_prob(&problem_state));
// === Libero la memoria del Mersenne Twister.
mtgp32_free(&mt_status);
}
// === Libero la memoria del cGA.
bga_free(&problem_state);
return EXIT_SUCCESS;
}
|
fe704650e27af51848a6324b7c1ee9268fe1a26e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <algorithm>
#include <hip/hip_runtime.h>
#include "RealMatrix.h"
// from CUDA Toolkit samples
#include <helper_cuda.h>
__device__ RealMatrix::RealMatrix( double matrix[], int rows, int cols ) {
set( matrix, rows, cols ) ;
}
__device__ RealMatrix::RealMatrix(
const double rc00, const double rc01, const double rc02,
const double rc10, const double rc11, const double rc12,
const double rc20, const double rc21, const double rc22 ) {
this->rows = 3 ;
this->cols = 3 ;
this->matrix = new double[9] {
rc00, rc01, rc02,
rc10, rc11, rc12,
rc20, rc21, rc22 } ;
}
__device__ RealMatrix::RealMatrix(
const double rc00, const double rc01, const double rc02, const double rc03,
const double rc10, const double rc11, const double rc12, const double rc13,
const double rc20, const double rc21, const double rc22, const double rc23,
const double rc30, const double rc31, const double rc32, const double rc33 ) {
this->rows = 4 ;
this->cols = 4 ;
this->matrix = new double[16] {
rc00, rc01, rc02, rc03,
rc10, rc11, rc12, rc13,
rc20, rc21, rc22, rc23,
rc30, rc31, rc32, rc33 } ;
}
__device__ RealMatrix::~RealMatrix() {
delete[] matrix ;
}
__device__ void RealMatrix::set( double matrix[], int rows, int cols ) {
int size = rows*cols ;
this->rows = rows ;
this->cols = cols ;
this->matrix = new double[size] ;
for ( int i=0 ; size>i ; i++ )
this->matrix[i] = matrix[i] ;
}
__device__ double* RealMatrix::operate( double vector[] ) {
double sum, *retval = new double[rows] ;
for ( int r=0 ; rows>r ; r++ ) {
sum = 0 ;
for ( int c=0 ; cols>c ; c++ )
sum += matrix[rows*r+c]*vector[c] ;
retval[r] = sum ;
}
return retval ;
}
#ifdef REALMATRIX_MAIN
// kernel
__global__ void realmatrix( double* buf ) {
double dat[] = {
10, 12, 12, 14,
21, 22, 23, 25,
32, 32, 34, 36,
43, 42, 45, 47
}, vec[4], *res ;
RealMatrix mat( dat, 4, 4 ) ;
int i = threadIdx.x ;
vec[0] = i+.12 ;
vec[1] = i+1+.34 ;
vec[2] = i+2+.56 ;
vec[3] = i+3+.78 ;
res = mat.operate( vec ) ;
buf[4*i] = res[0] ;
buf[4*i+1] = res[1] ;
buf[4*i+2] = res[2] ;
buf[4*i+3] = res[3] ;
delete[] res ;
}
#define NUM_BLOCKS 1
#define NUM_THREADS 360
int main( int argc, char** argv ) {
// host buffer
double buf[4*NUM_THREADS] ;
// device buffer
double* dbuf = NULL ;
hipDeviceProp_t devProp ;
int devID ;
// find device and output compute capability on stderr
devID = gpuGetMaxGflopsDeviceId() ;
checkCudaErrors( hipSetDevice( devID ) ) ;
checkCudaErrors( hipGetDeviceProperties( &devProp, devID ) ) ;
fprintf( stderr, "%d%d\n", devProp.major, devProp.minor ) ;
// allocate device buffer memory
checkCudaErrors( hipMalloc( (void**) &dbuf, sizeof( double )*4*NUM_THREADS ) ) ;
// run kernel
hipLaunchKernelGGL(( realmatrix), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, dbuf ) ;
// copy kernel results from device buffer to host
checkCudaErrors( hipMemcpy( buf, dbuf, sizeof( double )*4*NUM_THREADS, hipMemcpyDeviceToHost ) ) ;
checkCudaErrors( hipFree( dbuf ) ) ;
// output result on stdout
for ( int i=0 ; NUM_THREADS>i ; i++ )
printf( "%.4f %.4f %.4f %.4f\n", buf[4*i], buf[4*i+1], buf[4*i+2], buf[4*i+3] ) ;
return EXIT_SUCCESS ;
}
#endif // REALMATRIX_MAIN
|
fe704650e27af51848a6324b7c1ee9268fe1a26e.cu
|
#include <cstdio>
#include <cstdlib>
#include <algorithm>
#include <cuda_runtime.h>
#include "RealMatrix.h"
// from CUDA Toolkit samples
#include <helper_cuda.h>
__device__ RealMatrix::RealMatrix( double matrix[], int rows, int cols ) {
set( matrix, rows, cols ) ;
}
__device__ RealMatrix::RealMatrix(
const double rc00, const double rc01, const double rc02,
const double rc10, const double rc11, const double rc12,
const double rc20, const double rc21, const double rc22 ) {
this->rows = 3 ;
this->cols = 3 ;
this->matrix = new double[9] {
rc00, rc01, rc02,
rc10, rc11, rc12,
rc20, rc21, rc22 } ;
}
__device__ RealMatrix::RealMatrix(
const double rc00, const double rc01, const double rc02, const double rc03,
const double rc10, const double rc11, const double rc12, const double rc13,
const double rc20, const double rc21, const double rc22, const double rc23,
const double rc30, const double rc31, const double rc32, const double rc33 ) {
this->rows = 4 ;
this->cols = 4 ;
this->matrix = new double[16] {
rc00, rc01, rc02, rc03,
rc10, rc11, rc12, rc13,
rc20, rc21, rc22, rc23,
rc30, rc31, rc32, rc33 } ;
}
__device__ RealMatrix::~RealMatrix() {
delete[] matrix ;
}
__device__ void RealMatrix::set( double matrix[], int rows, int cols ) {
int size = rows*cols ;
this->rows = rows ;
this->cols = cols ;
this->matrix = new double[size] ;
for ( int i=0 ; size>i ; i++ )
this->matrix[i] = matrix[i] ;
}
__device__ double* RealMatrix::operate( double vector[] ) {
double sum, *retval = new double[rows] ;
for ( int r=0 ; rows>r ; r++ ) {
sum = 0 ;
for ( int c=0 ; cols>c ; c++ )
sum += matrix[rows*r+c]*vector[c] ;
retval[r] = sum ;
}
return retval ;
}
#ifdef REALMATRIX_MAIN
// kernel
__global__ void realmatrix( double* buf ) {
double dat[] = {
10, 12, 12, 14,
21, 22, 23, 25,
32, 32, 34, 36,
43, 42, 45, 47
}, vec[4], *res ;
RealMatrix mat( dat, 4, 4 ) ;
int i = threadIdx.x ;
vec[0] = i+.12 ;
vec[1] = i+1+.34 ;
vec[2] = i+2+.56 ;
vec[3] = i+3+.78 ;
res = mat.operate( vec ) ;
buf[4*i] = res[0] ;
buf[4*i+1] = res[1] ;
buf[4*i+2] = res[2] ;
buf[4*i+3] = res[3] ;
delete[] res ;
}
#define NUM_BLOCKS 1
#define NUM_THREADS 360
int main( int argc, char** argv ) {
// host buffer
double buf[4*NUM_THREADS] ;
// device buffer
double* dbuf = NULL ;
cudaDeviceProp devProp ;
int devID ;
// find device and output compute capability on stderr
devID = gpuGetMaxGflopsDeviceId() ;
checkCudaErrors( cudaSetDevice( devID ) ) ;
checkCudaErrors( cudaGetDeviceProperties( &devProp, devID ) ) ;
fprintf( stderr, "%d%d\n", devProp.major, devProp.minor ) ;
// allocate device buffer memory
checkCudaErrors( cudaMalloc( (void**) &dbuf, sizeof( double )*4*NUM_THREADS ) ) ;
// run kernel
realmatrix<<<NUM_BLOCKS, NUM_THREADS>>>( dbuf ) ;
// copy kernel results from device buffer to host
checkCudaErrors( cudaMemcpy( buf, dbuf, sizeof( double )*4*NUM_THREADS, cudaMemcpyDeviceToHost ) ) ;
checkCudaErrors( cudaFree( dbuf ) ) ;
// output result on stdout
for ( int i=0 ; NUM_THREADS>i ; i++ )
printf( "%.4f %.4f %.4f %.4f\n", buf[4*i], buf[4*i+1], buf[4*i+2], buf[4*i+3] ) ;
return EXIT_SUCCESS ;
}
#endif // REALMATRIX_MAIN
|
55dcd06df84756185217dc02ab8425377d35d6ec.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
__global__
void incKernel(int *g_out, const int *g_in, int N, int inner_reps) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
for (int i = 0; i < inner_reps; ++i) {
g_out[idx] = (i == 0 ? g_in[idx] : g_out[idx]) + 1;
}
}
}
#define STREAM_COUNT 4
int *h_data_in[STREAM_COUNT];
int *d_data_in[STREAM_COUNT];
int *h_data_out[STREAM_COUNT];
int *d_data_out[STREAM_COUNT];
hipStream_t stream[STREAM_COUNT];
int N = 1 << 22;
int nreps = 10; // number of times each experiment is repeated
int inner_reps = 5; // loop iterations in the GPU kernel
int memsize;
dim3 block (256, 1, 1);
dim3 grid (N/256, 1, 1);
float processWithStreams(int streams_used);
bool check();
int main(int argc, char *argv[]) {
printf("Length of the array = %d\n", N);
memsize = N * sizeof(int);
// Allocate resources
for (int i = 0; i < STREAM_COUNT; ++i) {
hipHostMalloc(&h_data_in[i], memsize, hipHostMallocDefault);
hipHostMalloc(&h_data_out[i], memsize, hipHostMallocDefault);
hipMalloc(&d_data_in[i], memsize);
hipMemset(d_data_in[i], 0, memsize);
hipMalloc(&d_data_out[i], memsize);
hipStreamCreate(&stream[i]);
}
// initialize host memory
for (int i = 0; i < STREAM_COUNT; ++i) {
memset(h_data_in[i], 0, memsize);
}
// Process pipelined work
float serial_time = processWithStreams(1);
float overlap_time = processWithStreams(STREAM_COUNT);
printf("\nAverage measured timings over %d repetitions:\n", nreps);
printf(" Avg. time when execution fully serialized\t: %f ms\n",
serial_time / nreps);
printf(" Avg. time when overlapped using %d streams\t: %f ms\n", STREAM_COUNT,
overlap_time / nreps);
printf(" Avg. speedup gained (serialized - overlapped)\t: %f\n",
(serial_time - overlap_time) / nreps);
printf("\nMeasured throughput:\n");
printf(" Fully serialized execution\t\t: %f GB/s\n",
(nreps * (memsize * 2e-6)) / serial_time);
printf(" Overlapped using %d streams\t\t: %f GB/s\n", STREAM_COUNT,
(nreps * (memsize * 2e-6)) / overlap_time);
// Verify the results, we will use the results for final output
bool bResults = check();
printf("\n%s\n", bResults ? "PASS" : "FAIL");
// Free resources
for (int i = 0; i < STREAM_COUNT; ++i) {
hipHostFree(h_data_in[i]);
hipFree(d_data_in[i]);
hipHostFree(h_data_out[i]);
hipFree(d_data_out[i]);
hipStreamDestroy(stream[i]);
}
// Test result
exit(bResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
float processWithStreams(int streams_used) {
int current_stream = 0;
auto start = std::chrono::steady_clock::now();
// Do processing in a loop
//
// Note: All memory commands are processed in the order they are issued,
// independent of the stream they are enqueued in. Hence the pattern by
// which the copy and kernel commands are enqueued in the stream
// has an influence on the achieved overlap.
for (int i = 0; i < nreps; ++i) {
int next_stream = (current_stream + 1) % streams_used;
// Process current frame
hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block), 0, stream[current_stream],
d_data_out[current_stream], d_data_in[current_stream], N, inner_reps);
// Upload next frame
hipMemcpyAsync(d_data_in[next_stream], h_data_in[next_stream], memsize,
hipMemcpyHostToDevice, stream[next_stream]);
// Download current frame
hipMemcpyAsync(h_data_out[current_stream], d_data_out[current_stream], memsize,
hipMemcpyDeviceToHost, stream[current_stream]);
current_stream = next_stream;
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return (time * 1e-6f); // milliseconds
}
bool check() {
bool passed = true;
for (int j = 0; j < STREAM_COUNT; ++j) {
for (int i = 0; i < N; ++i) {
passed &= (h_data_out[j][i] == inner_reps);
}
}
return passed;
}
|
55dcd06df84756185217dc02ab8425377d35d6ec.cu
|
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda_runtime.h>
__global__
void incKernel(int *g_out, const int *g_in, int N, int inner_reps) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
for (int i = 0; i < inner_reps; ++i) {
g_out[idx] = (i == 0 ? g_in[idx] : g_out[idx]) + 1;
}
}
}
#define STREAM_COUNT 4
int *h_data_in[STREAM_COUNT];
int *d_data_in[STREAM_COUNT];
int *h_data_out[STREAM_COUNT];
int *d_data_out[STREAM_COUNT];
cudaStream_t stream[STREAM_COUNT];
int N = 1 << 22;
int nreps = 10; // number of times each experiment is repeated
int inner_reps = 5; // loop iterations in the GPU kernel
int memsize;
dim3 block (256, 1, 1);
dim3 grid (N/256, 1, 1);
float processWithStreams(int streams_used);
bool check();
int main(int argc, char *argv[]) {
printf("Length of the array = %d\n", N);
memsize = N * sizeof(int);
// Allocate resources
for (int i = 0; i < STREAM_COUNT; ++i) {
cudaHostAlloc(&h_data_in[i], memsize, cudaHostAllocDefault);
cudaHostAlloc(&h_data_out[i], memsize, cudaHostAllocDefault);
cudaMalloc(&d_data_in[i], memsize);
cudaMemset(d_data_in[i], 0, memsize);
cudaMalloc(&d_data_out[i], memsize);
cudaStreamCreate(&stream[i]);
}
// initialize host memory
for (int i = 0; i < STREAM_COUNT; ++i) {
memset(h_data_in[i], 0, memsize);
}
// Process pipelined work
float serial_time = processWithStreams(1);
float overlap_time = processWithStreams(STREAM_COUNT);
printf("\nAverage measured timings over %d repetitions:\n", nreps);
printf(" Avg. time when execution fully serialized\t: %f ms\n",
serial_time / nreps);
printf(" Avg. time when overlapped using %d streams\t: %f ms\n", STREAM_COUNT,
overlap_time / nreps);
printf(" Avg. speedup gained (serialized - overlapped)\t: %f\n",
(serial_time - overlap_time) / nreps);
printf("\nMeasured throughput:\n");
printf(" Fully serialized execution\t\t: %f GB/s\n",
(nreps * (memsize * 2e-6)) / serial_time);
printf(" Overlapped using %d streams\t\t: %f GB/s\n", STREAM_COUNT,
(nreps * (memsize * 2e-6)) / overlap_time);
// Verify the results, we will use the results for final output
bool bResults = check();
printf("\n%s\n", bResults ? "PASS" : "FAIL");
// Free resources
for (int i = 0; i < STREAM_COUNT; ++i) {
cudaFreeHost(h_data_in[i]);
cudaFree(d_data_in[i]);
cudaFreeHost(h_data_out[i]);
cudaFree(d_data_out[i]);
cudaStreamDestroy(stream[i]);
}
// Test result
exit(bResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
float processWithStreams(int streams_used) {
int current_stream = 0;
auto start = std::chrono::steady_clock::now();
// Do processing in a loop
//
// Note: All memory commands are processed in the order they are issued,
// independent of the stream they are enqueued in. Hence the pattern by
// which the copy and kernel commands are enqueued in the stream
// has an influence on the achieved overlap.
for (int i = 0; i < nreps; ++i) {
int next_stream = (current_stream + 1) % streams_used;
// Process current frame
incKernel<<<grid, block, 0, stream[current_stream]>>>(
d_data_out[current_stream], d_data_in[current_stream], N, inner_reps);
// Upload next frame
cudaMemcpyAsync(d_data_in[next_stream], h_data_in[next_stream], memsize,
cudaMemcpyHostToDevice, stream[next_stream]);
// Download current frame
cudaMemcpyAsync(h_data_out[current_stream], d_data_out[current_stream], memsize,
cudaMemcpyDeviceToHost, stream[current_stream]);
current_stream = next_stream;
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return (time * 1e-6f); // milliseconds
}
bool check() {
bool passed = true;
for (int j = 0; j < STREAM_COUNT; ++j) {
for (int i = 0; i < N; ++i) {
passed &= (h_data_out[j][i] == inner_reps);
}
}
return passed;
}
|
03da63ef4f88989e946290af1a78292442e94ebc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zherk_fermi_batched_k32.cu normal z -> c, Fri Jan 30 19:00:10 2015
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
@author Azzam Haidar
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
herk_stencil.cuh defines the GPU kernel (device function).
herk_kernel_batched.cuh defines the GPU kernel (global function).
The batched version uses herk_kernel_batched.cuh instead of herk_kernel.cuh.
*/
#include "common_magma.h"
#include "commonblas_c.h"
#define PRECISION_c
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cgemm_fermi_kernels_batched_k32.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
CHERK performs one of the hermitian rank k operations
C := alpha*A*A**H + beta*C,
or
C := alpha*A**H*A + beta*C,
where alpha and beta are real scalars, C is an n by n hermitian
matrix and A is an n by k matrix in the first case and a k by n
matrix in the second case.
Parameters
----------
@param[in]
uplo CHARACTER*1.
On entry, uplo specifies whether the upper or lower
triangular part of the array C is to be referenced as
follows:
uplo = 'U' or 'u' Only the upper triangular part of C
is to be referenced.
uplo = 'L' or 'l' Only the lower triangular part of C
is to be referenced.
@param[in]
trans CHARACTER*1.
On entry, trans specifies the operation to be performed as
follows:
trans = 'N' or 'n' C := alpha*A*A**H + beta*C.
trans = 'C' or 'c' C := alpha*A**H*A + beta*C.
@param[in]
n INTEGER.
On entry, specifies the order of the matrix C. N must be
at least zero.
@param[in]
k INTEGER.
On entry with trans = 'N' or 'n', k specifies the number
of columns of the matrix A, and on entry with
trans = 'C' or 'c', k specifies the number of rows of the
matrix A. K must be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( ldda, ka ), where ka is
k when trans = MagmaNoTrans, and is n otherwise.
Before entry with trans = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, ldda specifies the first dimension of A as declared
in the calling (sub) program. When trans = MagmaNoTrans then
ldda must be at least max( 1, n ), otherwise ldda must be at
least max( 1, k ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC COMPLEX array of DIMENSION ( lddc, n ).
Before entry with uplo = 'U' or 'u', the leading n by n
upper triangular part of the array C must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of C is not referenced. On exit, the
upper triangular part of the array C is overwritten by the
upper triangular part of the updated matrix.
Before entry with uplo = 'L' or 'l', the leading n by n
lower triangular part of the array C must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of C is not referenced. On exit, the
lower triangular part of the array C is overwritten by the
lower triangular part of the updated matrix.
Note that the imaginary parts of the diagonal elements need
not be set, they are assumed to be zero, and on exit they
are set to zero.
@param[in]
lddc INTEGER.
On entry, lddc specifies the first dimension of dC as declared
in the calling (sub) program. lddc must be at least
max( 1, m ).
@ingroup magma_cblas3
********************************************************************/
extern "C" void
magmablas_cherk_batched_k32(
magma_uplo_t uplo, magma_trans_t trans, magma_int_t n, magma_int_t k,
float alpha,
magmaFloatComplex const * const * dA_array, magma_int_t ldda,
float beta,
magmaFloatComplex **dC_array, magma_int_t lddc, magma_int_t batchCount, magma_queue_t queue )
{
magmaFloatComplex cbeta = MAGMA_C_MAKE( beta, 0. );
magmaFloatComplex calpha = MAGMA_C_MAKE( alpha, 0. );
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower )
info = -1;
else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 )
info = -4;
else if ( trans == MagmaNoTrans ? ldda < n : ldda < k )
info = -7;
else if ( lddc < n )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
printf("not supported \n"); // TODO call cublas
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
int TransA = 0, TransB = 0, uploA = 0;
if ( uplo == MagmaLower )
uploA = 1;
else if ( uplo == MagmaUpper )
uploA = 2;
if ( trans == MagmaNoTrans )
#if defined(PRECISION_z) || defined(PRECISION_c)
TransB = 2;
#else
TransB = 1;
#endif
else if ( trans == MagmaTrans || trans == MagmaConjTrans)
#if defined(PRECISION_z) || defined(PRECISION_c)
TransA = 2;
#else
TransA = 1;
#endif
#ifdef TEXTURE_1D
size_t sizeA = (size_t) ldda * (size_t) (!TransA ? k : n);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE )
{
printf("not supported \n"); // TODO call cublas
return;
}
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = hipFilterModePoint;
tex_ref_A.addressMode[0] = hipAddressModeClamp;
// Bind A and B to texture references
hipError_t err;
err = hipBindTexture(&offsetA, tex_ref_A, dA_array[0], sizeA*sizeof(magmaFloatComplex));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(magmaFloatComplex);
if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (n - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_c_herk_kernel_fermi_nt_batched), dim3(dimGrid), dim3(dimBlock), 0, queue ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (n - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_c_herk_kernel_fermi_nc_batched), dim3(dimGrid), dim3(dimBlock), 0, queue ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_c_herk_kernel_fermi_tn_batched), dim3(dimGrid), dim3(dimBlock), 0, queue ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_c_herk_kernel_fermi_cn_batched), dim3(dimGrid), dim3(dimBlock), 0, queue ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
#ifdef TEXTURE_1D
hipUnbindTexture( tex_ref_A );
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
03da63ef4f88989e946290af1a78292442e94ebc.cu
|
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zherk_fermi_batched_k32.cu normal z -> c, Fri Jan 30 19:00:10 2015
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
@author Azzam Haidar
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
herk_stencil.cuh defines the GPU kernel (device function).
herk_kernel_batched.cuh defines the GPU kernel (global function).
The batched version uses herk_kernel_batched.cuh instead of herk_kernel.cuh.
*/
#include "common_magma.h"
#include "commonblas_c.h"
#define PRECISION_c
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cgemm_fermi_kernels_batched_k32.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
CHERK performs one of the hermitian rank k operations
C := alpha*A*A**H + beta*C,
or
C := alpha*A**H*A + beta*C,
where alpha and beta are real scalars, C is an n by n hermitian
matrix and A is an n by k matrix in the first case and a k by n
matrix in the second case.
Parameters
----------
@param[in]
uplo CHARACTER*1.
On entry, uplo specifies whether the upper or lower
triangular part of the array C is to be referenced as
follows:
uplo = 'U' or 'u' Only the upper triangular part of C
is to be referenced.
uplo = 'L' or 'l' Only the lower triangular part of C
is to be referenced.
@param[in]
trans CHARACTER*1.
On entry, trans specifies the operation to be performed as
follows:
trans = 'N' or 'n' C := alpha*A*A**H + beta*C.
trans = 'C' or 'c' C := alpha*A**H*A + beta*C.
@param[in]
n INTEGER.
On entry, specifies the order of the matrix C. N must be
at least zero.
@param[in]
k INTEGER.
On entry with trans = 'N' or 'n', k specifies the number
of columns of the matrix A, and on entry with
trans = 'C' or 'c', k specifies the number of rows of the
matrix A. K must be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( ldda, ka ), where ka is
k when trans = MagmaNoTrans, and is n otherwise.
Before entry with trans = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, ldda specifies the first dimension of A as declared
in the calling (sub) program. When trans = MagmaNoTrans then
ldda must be at least max( 1, n ), otherwise ldda must be at
least max( 1, k ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC COMPLEX array of DIMENSION ( lddc, n ).
Before entry with uplo = 'U' or 'u', the leading n by n
upper triangular part of the array C must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of C is not referenced. On exit, the
upper triangular part of the array C is overwritten by the
upper triangular part of the updated matrix.
Before entry with uplo = 'L' or 'l', the leading n by n
lower triangular part of the array C must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of C is not referenced. On exit, the
lower triangular part of the array C is overwritten by the
lower triangular part of the updated matrix.
Note that the imaginary parts of the diagonal elements need
not be set, they are assumed to be zero, and on exit they
are set to zero.
@param[in]
lddc INTEGER.
On entry, lddc specifies the first dimension of dC as declared
in the calling (sub) program. lddc must be at least
max( 1, m ).
@ingroup magma_cblas3
********************************************************************/
extern "C" void
magmablas_cherk_batched_k32(
magma_uplo_t uplo, magma_trans_t trans, magma_int_t n, magma_int_t k,
float alpha,
magmaFloatComplex const * const * dA_array, magma_int_t ldda,
float beta,
magmaFloatComplex **dC_array, magma_int_t lddc, magma_int_t batchCount, magma_queue_t queue )
{
magmaFloatComplex cbeta = MAGMA_C_MAKE( beta, 0. );
magmaFloatComplex calpha = MAGMA_C_MAKE( alpha, 0. );
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower )
info = -1;
else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 )
info = -4;
else if ( trans == MagmaNoTrans ? ldda < n : ldda < k )
info = -7;
else if ( lddc < n )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
printf("not supported \n"); // TODO call cublas
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
int TransA = 0, TransB = 0, uploA = 0;
if ( uplo == MagmaLower )
uploA = 1;
else if ( uplo == MagmaUpper )
uploA = 2;
if ( trans == MagmaNoTrans )
#if defined(PRECISION_z) || defined(PRECISION_c)
TransB = 2;
#else
TransB = 1;
#endif
else if ( trans == MagmaTrans || trans == MagmaConjTrans)
#if defined(PRECISION_z) || defined(PRECISION_c)
TransA = 2;
#else
TransA = 1;
#endif
#ifdef TEXTURE_1D
size_t sizeA = (size_t) ldda * (size_t) (!TransA ? k : n);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE )
{
printf("not supported \n"); // TODO call cublas
return;
}
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = cudaFilterModePoint;
tex_ref_A.addressMode[0] = cudaAddressModeClamp;
// Bind A and B to texture references
cudaError_t err;
err = cudaBindTexture(&offsetA, tex_ref_A, dA_array[0], sizeA*sizeof(magmaFloatComplex));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(magmaFloatComplex);
if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (n - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 ,
batchCount );
magmablas_c_herk_kernel_fermi_nt_batched<<< dimGrid, dimBlock, 0, queue >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (n - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 ,
batchCount );
magmablas_c_herk_kernel_fermi_nc_batched<<< dimGrid, dimBlock, 0, queue >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 ,
batchCount );
magmablas_c_herk_kernel_fermi_tn_batched<<< dimGrid, dimBlock, 0, queue >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 ,
batchCount );
magmablas_c_herk_kernel_fermi_cn_batched<<< dimGrid, dimBlock, 0, queue >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
#ifdef TEXTURE_1D
cudaUnbindTexture( tex_ref_A );
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
f635375ac98dd827ec662f63ff82f17a14d369d9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "brick-cuda.h"
#include "head.h"
#include "headcu.h"
#define out(i, j, k) out_arr[k][j][i]
#define in(i, j, k) in_arr[k][j][i]
__global__ void
arr_kernel(bElem *in_ptr, bElem *out_ptr, bElem *c) {
auto in_arr = (bElem (*)[STRIDE][STRIDE]) in_ptr;
auto out_arr = (bElem (*)[STRIDE][STRIDE]) out_ptr;
#include "arrcusched.h"
{
#include "kernel.h"
}
}
#undef out
#undef in
__global__ void
brick_kernel(unsigned (*grid)[STRIDE/TILEJ][STRIDE/TILEI], Brick3D in, Brick3D out, bElem *c) {
#include "bricusched.h"
brick("kernel.py", BVEC, (TILEK, TILEJ, TILEI), (BFOLD), b);
}
int main() {
// allocations
bElem *c = randomArray({7});
bElem *c_dev;
copyToDevice({7}, c_dev, c);
auto in_arr = randomArray({STRIDE, STRIDE, STRIDE});
bElem *in_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, in_dev, in_arr);
auto out_arr = zeroArray({STRIDE, STRIDE, STRIDE});
bElem *out_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, out_dev, out_arr);
{
auto compute = [&]() -> void {
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(_TILEI, _TILEJ, _TILEK);
hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread) , 0, 0, in_dev, out_dev, c_dev);
};
#ifndef TYPE
#include "cutiming.h"
#else
compute();
#endif
copyFromDevice({STRIDE, STRIDE, STRIDE}, out_arr, out_dev);
}
#if TYPE == 1
{
unsigned *grid_ptr;
unsigned bSize = TILEK * TILEJ * TILEI;
auto bInfo = init_grid<3>(grid_ptr, {STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI});
unsigned *grid_dev;
copyToDevice({STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI}, grid_dev, grid_ptr);
auto bStorage = BrickStorage::allocate(bInfo.nbricks, bSize * 2);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> in_bri(&bInfo, &bStorage, 0);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> out_bri(&bInfo, &bStorage, bSize);
BrickInfo<3> *bInfo_dev;
auto _bInfo_dev = movBrickInfo(bInfo, hipMemcpyHostToDevice);
{
unsigned size = sizeof(BrickInfo<3>);
hipMalloc(&bInfo_dev, size);
hipMemcpy(bInfo_dev, &_bInfo_dev, size, hipMemcpyHostToDevice);
}
copyBrick<3>({STRIDE, STRIDE, STRIDE}, in_arr, grid_ptr, in_bri);
BrickStorage *bStorage_dev;
BrickStorage _bStorage_dev = movBrickStorage(bStorage, hipMemcpyHostToDevice);
{
unsigned size = sizeof(BrickStorage);
hipMalloc(&bStorage_dev, size);
hipMemcpy(bStorage_dev, &_bStorage_dev, size, hipMemcpyHostToDevice);
}
auto compute = [&]() -> void {
Brick3D bIn(bInfo_dev, &_bStorage_dev, 0);
Brick3D bOut(bInfo_dev, &_bStorage_dev, bSize);
bIn.bStorage = bStorage_dev;
bOut.bStorage = bStorage_dev;
auto grid = (unsigned (*)[STRIDE/TILEJ][STRIDE/TILEI]) grid_dev;
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(32);
hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread) , 0, 0, grid, bIn, bOut, c_dev);
};
#include "cutiming.h"
hipDeviceSynchronize();
hipMemcpy(bStorage.dat, _bStorage_dev.dat, bStorage.chunks * bStorage.step * sizeof(bElem), hipMemcpyDeviceToHost);
if (!compareBrick<3>({STRIDE, STRIDE, STRIDE}, out_arr, grid_ptr, out_bri))
return 1;
}
#endif
return 0;
}
|
f635375ac98dd827ec662f63ff82f17a14d369d9.cu
|
#include "brick-cuda.h"
#include "head.h"
#include "headcu.h"
#define out(i, j, k) out_arr[k][j][i]
#define in(i, j, k) in_arr[k][j][i]
__global__ void
arr_kernel(bElem *in_ptr, bElem *out_ptr, bElem *c) {
auto in_arr = (bElem (*)[STRIDE][STRIDE]) in_ptr;
auto out_arr = (bElem (*)[STRIDE][STRIDE]) out_ptr;
#include "arrcusched.h"
{
#include "kernel.h"
}
}
#undef out
#undef in
__global__ void
brick_kernel(unsigned (*grid)[STRIDE/TILEJ][STRIDE/TILEI], Brick3D in, Brick3D out, bElem *c) {
#include "bricusched.h"
brick("kernel.py", BVEC, (TILEK, TILEJ, TILEI), (BFOLD), b);
}
int main() {
// allocations
bElem *c = randomArray({7});
bElem *c_dev;
copyToDevice({7}, c_dev, c);
auto in_arr = randomArray({STRIDE, STRIDE, STRIDE});
bElem *in_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, in_dev, in_arr);
auto out_arr = zeroArray({STRIDE, STRIDE, STRIDE});
bElem *out_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, out_dev, out_arr);
{
auto compute = [&]() -> void {
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(_TILEI, _TILEJ, _TILEK);
arr_kernel<<< block, thread >>>(in_dev, out_dev, c_dev);
};
#ifndef TYPE
#include "cutiming.h"
#else
compute();
#endif
copyFromDevice({STRIDE, STRIDE, STRIDE}, out_arr, out_dev);
}
#if TYPE == 1
{
unsigned *grid_ptr;
unsigned bSize = TILEK * TILEJ * TILEI;
auto bInfo = init_grid<3>(grid_ptr, {STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI});
unsigned *grid_dev;
copyToDevice({STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI}, grid_dev, grid_ptr);
auto bStorage = BrickStorage::allocate(bInfo.nbricks, bSize * 2);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> in_bri(&bInfo, &bStorage, 0);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> out_bri(&bInfo, &bStorage, bSize);
BrickInfo<3> *bInfo_dev;
auto _bInfo_dev = movBrickInfo(bInfo, cudaMemcpyHostToDevice);
{
unsigned size = sizeof(BrickInfo<3>);
cudaMalloc(&bInfo_dev, size);
cudaMemcpy(bInfo_dev, &_bInfo_dev, size, cudaMemcpyHostToDevice);
}
copyBrick<3>({STRIDE, STRIDE, STRIDE}, in_arr, grid_ptr, in_bri);
BrickStorage *bStorage_dev;
BrickStorage _bStorage_dev = movBrickStorage(bStorage, cudaMemcpyHostToDevice);
{
unsigned size = sizeof(BrickStorage);
cudaMalloc(&bStorage_dev, size);
cudaMemcpy(bStorage_dev, &_bStorage_dev, size, cudaMemcpyHostToDevice);
}
auto compute = [&]() -> void {
Brick3D bIn(bInfo_dev, &_bStorage_dev, 0);
Brick3D bOut(bInfo_dev, &_bStorage_dev, bSize);
bIn.bStorage = bStorage_dev;
bOut.bStorage = bStorage_dev;
auto grid = (unsigned (*)[STRIDE/TILEJ][STRIDE/TILEI]) grid_dev;
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(32);
brick_kernel<<< block, thread >>>(grid, bIn, bOut, c_dev);
};
#include "cutiming.h"
cudaDeviceSynchronize();
cudaMemcpy(bStorage.dat, _bStorage_dev.dat, bStorage.chunks * bStorage.step * sizeof(bElem), cudaMemcpyDeviceToHost);
if (!compareBrick<3>({STRIDE, STRIDE, STRIDE}, out_arr, grid_ptr, out_bri))
return 1;
}
#endif
return 0;
}
|
7e0223a498b5da38fb82770a47157b5faccef00f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "set_cl.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *nnz_num = NULL;
hipMalloc(&nnz_num, XSIZE*YSIZE);
int *cl = NULL;
hipMalloc(&cl, XSIZE*YSIZE);
int chunk = 1;
int pad_M = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
set_cl), dim3(gridBlock),dim3(threadBlock), 0, 0, nnz_num,cl,chunk,pad_M);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
set_cl), dim3(gridBlock),dim3(threadBlock), 0, 0, nnz_num,cl,chunk,pad_M);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
set_cl), dim3(gridBlock),dim3(threadBlock), 0, 0, nnz_num,cl,chunk,pad_M);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
7e0223a498b5da38fb82770a47157b5faccef00f.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "set_cl.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *nnz_num = NULL;
cudaMalloc(&nnz_num, XSIZE*YSIZE);
int *cl = NULL;
cudaMalloc(&cl, XSIZE*YSIZE);
int chunk = 1;
int pad_M = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
set_cl<<<gridBlock,threadBlock>>>(nnz_num,cl,chunk,pad_M);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
set_cl<<<gridBlock,threadBlock>>>(nnz_num,cl,chunk,pad_M);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
set_cl<<<gridBlock,threadBlock>>>(nnz_num,cl,chunk,pad_M);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3be2646401db1a8d860683b77b10d3f1e1a1aae4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <algorithm>
#include "private.h"
#include "metric_abstraction.h"
#include "tricks.cuh"
#define CLUSTER_DISTANCES_BLOCK_SIZE 512
#define CLUSTER_DISTANCES_SHMEM 12288 // in float-s
#define CLUSTER_RADIUSES_BLOCK_SIZE 512
#define CLUSTER_RADIUSES_SHMEM 8192 // in float-s
#define KNN_BLOCK_SIZE_SHMEM 512
#define KNN_BLOCK_SIZE_GMEM 1024
__constant__ uint32_t d_samples_size;
__constant__ uint32_t d_clusters_size;
__device__ unsigned long long int d_dists_calced;
/// sample_dists musr be zero-ed!
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_calc_cluster_radiuses(
uint32_t offset, uint32_t length, const uint32_t *__restrict__ inv_asses,
const uint32_t *__restrict__ inv_asses_offsets,
const F *__restrict__ centroids, const F *__restrict__ samples,
float *__restrict__ sample_dists, float *__restrict__ radiuses) {
volatile uint32_t ci = blockIdx.x * blockDim.x + threadIdx.x;
if (ci >= length) {
return;
}
ci += offset;
// stage 1 - accumulate partial distances for every sample
__shared__ F shcents[CLUSTER_RADIUSES_SHMEM];
volatile const int cent_step = min(
CLUSTER_RADIUSES_SHMEM / blockDim.x, static_cast<unsigned>(d_features_size));
F *volatile const my_cent = shcents + cent_step * threadIdx.x;
for (int cfi = 0; cfi < d_features_size; cfi += cent_step) {
const int fsize = min(cent_step, d_features_size - cfi);
for (int f = 0; f < fsize; f++) {
my_cent[f] = centroids[ci * d_features_size + cfi + f];
}
for (uint32_t ass = inv_asses_offsets[ci]; ass < inv_asses_offsets[ci + 1];
ass++) {
uint64_t sample = inv_asses[ass]; // uint64_t!
sample_dists[sample] += METRIC<M, F>::partial_t(
samples, my_cent, fsize, d_samples_size, cfi, sample);
}
}
// stage 2 - find the maximum distance
float max_dist = -1;
for (uint32_t ass = inv_asses_offsets[ci]; ass < inv_asses_offsets[ci + 1];
ass++) {
float dist = METRIC<M, F>::finalize(sample_dists[inv_asses[ass]]);
if (dist > max_dist) {
max_dist = dist;
}
}
radiuses[ci] = max_dist > -1? max_dist : NAN;
}
/// distances must be zero-ed!
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_calc_cluster_distances(
uint32_t offset, const F *__restrict__ centroids, float *distances) {
volatile const uint32_t bi = blockIdx.x + offset;
const uint32_t bs = CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t x, y;
const uint32_t n = dupper(d_clusters_size, bs);
{
float tmp = n + 0.5;
float d = _sqrt(tmp * tmp - 2 * bi);
y = tmp - d;
x = bi + y + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
}
__shared__ F shcents[CLUSTER_DISTANCES_SHMEM];
const uint32_t fstep = CLUSTER_DISTANCES_SHMEM / bs;
F *volatile my_cent = shcents + fstep * threadIdx.x;
// stage 1 - accumulate distances
for (uint16_t fpos = 0; fpos < d_features_size; fpos += fstep) {
__syncthreads();
const uint16_t fsize = min(
fstep, static_cast<uint32_t>(d_features_size - fpos));
uint32_t cbase = x * bs + threadIdx.x;
if (cbase < d_clusters_size) {
for (uint16_t f = 0; f < fsize; f++) {
my_cent[f] = centroids[cbase * d_features_size + fpos + f];
}
}
__syncthreads();
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size
&& (x * bs + ti) < d_clusters_size) {
auto other_cent = d_clusters_size <= bs?
shcents + (y * bs + threadIdx.x) * fstep
:
centroids + (y * bs + threadIdx.x) * d_features_size + fpos;
distances[(y * bs + threadIdx.x) * d_clusters_size + x * bs + ti] +=
METRIC<M, F>::partial(other_cent, shcents + ti * fstep, fsize);
}
}
}
// stage 2 - finalize the distances
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size
&& (x * bs + ti) < d_clusters_size) {
uint32_t di = (y * bs + threadIdx.x) * d_clusters_size + x * bs + ti;
float dist = distances[di];
dist = METRIC<M, F>::finalize(dist);
distances[di] = dist;
}
}
}
__global__ void knn_mirror_cluster_distances(float *__restrict__ distances) {
const uint32_t bs = CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t x, y;
const uint32_t n = dupper(d_clusters_size, bs);
{
float tmp = n + 0.5;
float d = _sqrt(tmp * tmp - 2 * blockIdx.x);
y = tmp - d;
x = blockIdx.x + y + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
}
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size && (x * bs + ti) < d_clusters_size) {
distances[(x * bs + ti) * d_clusters_size + y * bs + threadIdx.x] =
distances[(y * bs + threadIdx.x) * d_clusters_size + x * bs + ti];
}
}
}
FPATTR void push_sample(uint16_t k, float dist, uint32_t index, float *heap) {
uint16_t pos = 0;
while (true) {
float left, right;
bool left_le, right_le;
if ((2 * pos + 1) < k) {
left = heap[4 * pos + 2];
left_le = dist >= left;
} else {
left_le = true;
}
if ((2 * pos + 2) < k) {
right = heap[4 * pos + 4];
right_le = dist >= right;
} else {
right_le = true;
}
if (left_le && right_le) {
heap[2 * pos] = dist;
*reinterpret_cast<uint32_t *>(heap + 2 * pos + 1) = index;
break;
}
if (!left_le && !right_le) {
if (left <= right) {
heap[2 * pos] = right;
heap[2 * pos + 1] = heap[4 * pos + 5];
pos = 2 * pos + 2;
} else {
heap[2 * pos] = left;
heap[2 * pos + 1] = heap[4 * pos + 3];
pos = 2 * pos + 1;
}
} else if (left_le) {
heap[2 * pos] = right;
heap[2 * pos + 1] = heap[4 * pos + 5];
pos = 2 * pos + 2;
} else {
heap[2 * pos] = left;
heap[2 * pos + 1] = heap[4 * pos + 3];
pos = 2 * pos + 1;
}
}
}
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_assign_shmem(
uint32_t offset, uint32_t length, uint16_t k,
const float *__restrict__ cluster_distances,
const float *__restrict__ cluster_radiuses,
const F *__restrict__ samples, const F *__restrict__ centroids,
const uint32_t *assignments, const uint32_t *inv_asses,
const uint32_t *inv_asses_offsets, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
sample += offset;
volatile uint32_t mycls = assignments[sample];
volatile float mydist = METRIC<M, F>::distance_t(
samples, centroids + mycls * d_features_size, d_samples_size, sample);
extern __shared__ float buffer[];
float *volatile mynearest = buffer + k * 2 * threadIdx.x;
volatile float mndist = FLT_MAX;
for (int i = 0; i < static_cast<int>(k); i++) {
mynearest[i * 2] = FLT_MAX;
}
uint32_t pos_start = inv_asses_offsets[mycls];
uint32_t pos_finish = inv_asses_offsets[mycls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
if (sample == other_sample) {
continue;
}
float dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
for (uint32_t cls = 0; cls < d_clusters_size; cls++) {
if (cls == mycls) {
continue;
}
float cdist = cluster_distances[cls * d_clusters_size + mycls];
if (cdist != cdist) {
continue;
}
float dist = cdist - mydist - cluster_radiuses[cls];
if (dist > mndist) {
continue;
}
uint32_t pos_start = inv_asses_offsets[cls];
uint32_t pos_finish = inv_asses_offsets[cls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
}
for (int i = k - 1; i >= 0; i--) {
neighbors[(sample - offset) * k + i] = reinterpret_cast<uint32_t*>(mynearest)[1];
push_sample(k, -1, UINT32_MAX, mynearest);
}
}
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_assign_gmem(
uint32_t offset, uint32_t length, uint16_t k,
const float *__restrict__ cluster_distances,
const float *__restrict__ cluster_radiuses,
const F *__restrict__ samples, const F *__restrict__ centroids,
const uint32_t *assignments, const uint32_t *inv_asses,
const uint32_t *inv_asses_offsets, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
sample += offset;
volatile uint32_t mycls = assignments[sample];
volatile float mydist = METRIC<M, F>::distance_t(
samples, centroids + mycls * d_features_size, d_samples_size, sample);
float *volatile mynearest =
reinterpret_cast<float*>(neighbors) + (sample - offset) * k * 2;
volatile float mndist = FLT_MAX;
for (int i = 0; i < static_cast<int>(k); i++) {
mynearest[i * 2] = FLT_MAX;
}
uint32_t pos_start = inv_asses_offsets[mycls];
uint32_t pos_finish = inv_asses_offsets[mycls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
if (sample == other_sample) {
continue;
}
float dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
for (uint32_t cls = 0; cls < d_clusters_size; cls++) {
if (cls == mycls) {
continue;
}
float cdist = cluster_distances[cls * d_clusters_size + mycls];
if (cdist != cdist) {
continue;
}
float dist = cdist - mydist - cluster_radiuses[cls];
if (dist > mndist) {
continue;
}
pos_start = inv_asses_offsets[cls];
pos_finish = inv_asses_offsets[cls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
}
for (int i = 0; i < k; i++) {
uint32_t imax = reinterpret_cast<uint32_t*>(mynearest)[1];
push_sample(k - i - 1, mynearest[2 * k - 2 * i - 2],
reinterpret_cast<uint32_t*>(mynearest)[2 * k - 2 * i - 1],
mynearest);
reinterpret_cast<uint32_t*>(mynearest)[2 * k - 2 * i - 1] = imax;
}
for (int i = 0; i < k; i++) {
reinterpret_cast<uint32_t*>(mynearest)[i] =
reinterpret_cast<uint32_t*>(mynearest)[2 * i + 1];
}
}
__global__ void knn_assign_gmem_deinterleave1(
uint32_t length, uint16_t k, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
if (sample % 2 == 1) {
for (int i = 0; i < k; i++) {
neighbors[sample * k + i] = neighbors[sample * 2 * k + i];
}
} else {
for (int i = 0; i < k; i++) {
neighbors[(length + sample) * k + k + i] = neighbors[sample * 2 * k + i];
}
}
}
__global__ void knn_assign_gmem_deinterleave2(
uint32_t length, uint16_t k, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
sample *= 2;
if (sample >= length) {
return;
}
for (int i = 0; i < k; i++) {
neighbors[sample * k + i] = neighbors[(length + sample) * k + k + i];
}
}
extern "C" {
KMCUDAResult knn_cuda_setup(
uint32_t h_samples_size, uint16_t h_features_size, uint32_t h_clusters_size,
const std::vector<int> &devs, int32_t verbosity) {
FOR_EACH_DEV(
CUCH(hipMemcpyToSymbol(d_samples_size, &h_samples_size, sizeof(h_samples_size)),
kmcudaMemoryCopyError);
CUCH(hipMemcpyToSymbol(d_features_size, &h_features_size, sizeof(h_features_size)),
kmcudaMemoryCopyError);
CUCH(hipMemcpyToSymbol(d_clusters_size, &h_clusters_size, sizeof(h_clusters_size)),
kmcudaMemoryCopyError);
uint64_t zero = 0;
CUCH(hipMemcpyToSymbol(d_dists_calced, &zero, sizeof(d_dists_calced)),
kmcudaMemoryCopyError);
);
return kmcudaSuccess;
}
int knn_cuda_neighbors_mem_multiplier(uint16_t k, int dev, int verbosity) {
hipDeviceProp_t props;
hipGetDeviceProperties(&props, dev);
int shmem_size = static_cast<int>(props.sharedMemPerBlock);
int needed_shmem_size = KNN_BLOCK_SIZE_SHMEM * 2 * k * sizeof(uint32_t);
if (needed_shmem_size > shmem_size) {
INFO("device #%d: needed shmem size %d > %d => using global memory\n",
dev, needed_shmem_size, shmem_size);
return 2;
}
return 1;
}
KMCUDAResult knn_cuda_calc(
uint16_t k, uint32_t h_samples_size, uint32_t h_clusters_size,
uint16_t h_features_size, KMCUDADistanceMetric metric,
const std::vector<int> &devs, int fp16x2, int verbosity,
const udevptrs<float> &samples, const udevptrs<float> ¢roids,
const udevptrs<uint32_t> &assignments, const udevptrs<uint32_t> &inv_asses,
const udevptrs<uint32_t> &inv_asses_offsets, udevptrs<float> *distances,
udevptrs<float>* sample_dists, udevptrs<float> *radiuses,
udevptrs<uint32_t> *neighbors) {
auto plan = distribute(h_clusters_size, h_features_size * sizeof(float), devs);
if (verbosity > 1) {
print_plan("plan_calc_radiuses", plan);
}
INFO("calculating the cluster radiuses...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (length == 0) {
continue;
}
dim3 block(CLUSTER_RADIUSES_BLOCK_SIZE, 1, 1);
dim3 grid(upper(h_clusters_size, block.x), 1, 1);
float *dsd;
if (h_clusters_size * h_clusters_size >= h_samples_size) {
dsd = (*distances)[devi].get();
} else {
dsd = (*sample_dists)[devi].get();
}
hipLaunchKernelGGL(( KERNEL_SWITCH(knn_calc_cluster_radiuses), , dim3(grid), dim3(block), 0, 0,
offset, length, inv_asses[devi].get(), inv_asses_offsets[devi].get(),
reinterpret_cast<const F*>(centroids[devi].get()),
reinterpret_cast<const F*>(samples[devi].get()),
dsd, (*radiuses)[devi].get()));
);
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
FOR_OTHER_DEVS(
CUP2P(radiuses, offset, length);
);
);
if (h_clusters_size * h_clusters_size >= h_samples_size) {
CUMEMSET_ASYNC(*distances, 0, h_samples_size);
}
uint32_t dist_blocks_dim = upper(
h_clusters_size, static_cast<uint32_t>(CLUSTER_DISTANCES_BLOCK_SIZE));
uint32_t dist_blocks_n = (2 * dist_blocks_dim + 1) * (2 * dist_blocks_dim + 1) / 8;
plan = distribute(dist_blocks_n, 512, devs);
{ // align across CLUSTER_DISTANCES_BLOCK_SIZE horizontal boundaries
uint32_t align = 0;
for (auto& p : plan) {
uint32_t offset, length;
std::tie(offset, length) = p;
offset += align;
std::get<0>(p) = offset;
uint32_t n = dist_blocks_dim;
float tmp = n + 0.5;
float d = sqrt(tmp * tmp - 2 * (offset + length));
uint32_t y = tmp - d;
uint32_t x = offset + length + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
if (x > 0) {
align = n - y - x;
std::get<1>(p) += align;
}
}
}
if (verbosity > 1) {
print_plan("plan_calc_cluster_distances", plan);
}
INFO("calculating the centroid distance matrix...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (length == 0) {
continue;
}
dim3 block(CLUSTER_DISTANCES_BLOCK_SIZE, 1, 1);
dim3 grid(length, 1, 1);
hipLaunchKernelGGL(( KERNEL_SWITCH(knn_calc_cluster_distances), , dim3(grid), dim3(block), 0, 0,
offset, reinterpret_cast<const F*>(centroids[devi].get()),
(*distances)[devi].get()));
);
FOR_EACH_DEVI(
uint32_t y_start, y_finish;
{
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
float tmp = dist_blocks_dim + 0.5;
float d = sqrt(tmp * tmp - 2 * offset);
y_start = tmp - d;
d = sqrt(tmp * tmp - 2 * (offset + length));
y_finish = tmp - d;
}
if (y_finish == y_start) {
continue;
}
uint32_t p_offset = y_start * h_clusters_size * CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t p_size = (y_finish - y_start) * h_clusters_size * CLUSTER_DISTANCES_BLOCK_SIZE;
p_size = ::min(p_size, h_clusters_size * h_clusters_size - p_offset);
FOR_OTHER_DEVS(
CUP2P(distances, p_offset, p_size);
);
);
FOR_EACH_DEVI(
dim3 block(CLUSTER_DISTANCES_BLOCK_SIZE, 1, 1);
dim3 grid(dist_blocks_n, 1, 1);
hipLaunchKernelGGL(( knn_mirror_cluster_distances), dim3(grid), dim3(block), 0, 0, (*distances)[devi].get());
);
plan = distribute(h_samples_size, h_features_size * sizeof(float), devs);
INFO("searching for the nearest neighbors...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (knn_cuda_neighbors_mem_multiplier(k, devs[devi], 1) == 2) {
dim3 block(KNN_BLOCK_SIZE_GMEM, 1, 1);
dim3 grid(upper(h_samples_size, block.x), 1, 1);
hipLaunchKernelGGL(( KERNEL_SWITCH(knn_assign_gmem), , dim3(grid), dim3(block), 0, 0,
offset, length, k, (*distances)[devi].get(), (*radiuses)[devi].get(),
reinterpret_cast<const F*>(samples[devi].get()),
reinterpret_cast<const F*>(centroids[devi].get()),
assignments[devi].get(), inv_asses[devi].get(),
inv_asses_offsets[devi].get(), (*neighbors)[devi].get()));
hipLaunchKernelGGL(( knn_assign_gmem_deinterleave1), dim3(grid), dim3(block), 0, 0,
length, k, (*neighbors)[devi].get());
dim3 grid2(upper(h_samples_size, 2 * block.x), 1, 1);
hipLaunchKernelGGL(( knn_assign_gmem_deinterleave2), dim3(grid2), dim3(block), 0, 0,
length, k, (*neighbors)[devi].get());
} else {
dim3 block(KNN_BLOCK_SIZE_SHMEM, 1, 1);
dim3 grid(upper(h_samples_size, block.x), 1, 1);
KERNEL_SWITCH(
hipLaunchKernelGGL(( knn_assign_shmem),
, dim3(grid), dim3(block), KNN_BLOCK_SIZE_SHMEM * 2 * k * sizeof(uint32_t), 0,
offset, length, k, (*distances)[devi].get(), (*radiuses)[devi].get(),
reinterpret_cast<const F*>(samples[devi].get()),
reinterpret_cast<const F*>(centroids[devi].get()),
assignments[devi].get(), inv_asses[devi].get(),
inv_asses_offsets[devi].get(), (*neighbors)[devi].get()));
}
);
uint64_t dists_calced = 0;
FOR_EACH_DEV(
uint64_t h_dists_calced = 0;
CUCH(hipMemcpyFromSymbol(&h_dists_calced, d_dists_calced, sizeof(h_dists_calced)),
kmcudaMemoryCopyError);
DEBUG("#%d dists_calced: %" PRIu64 "\n", dev, h_dists_calced);
dists_calced += h_dists_calced;
);
uint64_t max_dists_calced = static_cast<uint64_t>(h_samples_size) * h_samples_size;
INFO("calculated %f of all the distances\n", (dists_calced + .0) / max_dists_calced);
return kmcudaSuccess;
}
} // extern "C"
|
3be2646401db1a8d860683b77b10d3f1e1a1aae4.cu
|
#include <cfloat>
#include <algorithm>
#include "private.h"
#include "metric_abstraction.h"
#include "tricks.cuh"
#define CLUSTER_DISTANCES_BLOCK_SIZE 512
#define CLUSTER_DISTANCES_SHMEM 12288 // in float-s
#define CLUSTER_RADIUSES_BLOCK_SIZE 512
#define CLUSTER_RADIUSES_SHMEM 8192 // in float-s
#define KNN_BLOCK_SIZE_SHMEM 512
#define KNN_BLOCK_SIZE_GMEM 1024
__constant__ uint32_t d_samples_size;
__constant__ uint32_t d_clusters_size;
__device__ unsigned long long int d_dists_calced;
/// sample_dists musr be zero-ed!
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_calc_cluster_radiuses(
uint32_t offset, uint32_t length, const uint32_t *__restrict__ inv_asses,
const uint32_t *__restrict__ inv_asses_offsets,
const F *__restrict__ centroids, const F *__restrict__ samples,
float *__restrict__ sample_dists, float *__restrict__ radiuses) {
volatile uint32_t ci = blockIdx.x * blockDim.x + threadIdx.x;
if (ci >= length) {
return;
}
ci += offset;
// stage 1 - accumulate partial distances for every sample
__shared__ F shcents[CLUSTER_RADIUSES_SHMEM];
volatile const int cent_step = min(
CLUSTER_RADIUSES_SHMEM / blockDim.x, static_cast<unsigned>(d_features_size));
F *volatile const my_cent = shcents + cent_step * threadIdx.x;
for (int cfi = 0; cfi < d_features_size; cfi += cent_step) {
const int fsize = min(cent_step, d_features_size - cfi);
for (int f = 0; f < fsize; f++) {
my_cent[f] = centroids[ci * d_features_size + cfi + f];
}
for (uint32_t ass = inv_asses_offsets[ci]; ass < inv_asses_offsets[ci + 1];
ass++) {
uint64_t sample = inv_asses[ass]; // uint64_t!
sample_dists[sample] += METRIC<M, F>::partial_t(
samples, my_cent, fsize, d_samples_size, cfi, sample);
}
}
// stage 2 - find the maximum distance
float max_dist = -1;
for (uint32_t ass = inv_asses_offsets[ci]; ass < inv_asses_offsets[ci + 1];
ass++) {
float dist = METRIC<M, F>::finalize(sample_dists[inv_asses[ass]]);
if (dist > max_dist) {
max_dist = dist;
}
}
radiuses[ci] = max_dist > -1? max_dist : NAN;
}
/// distances must be zero-ed!
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_calc_cluster_distances(
uint32_t offset, const F *__restrict__ centroids, float *distances) {
volatile const uint32_t bi = blockIdx.x + offset;
const uint32_t bs = CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t x, y;
const uint32_t n = dupper(d_clusters_size, bs);
{
float tmp = n + 0.5;
float d = _sqrt(tmp * tmp - 2 * bi);
y = tmp - d;
x = bi + y + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
}
__shared__ F shcents[CLUSTER_DISTANCES_SHMEM];
const uint32_t fstep = CLUSTER_DISTANCES_SHMEM / bs;
F *volatile my_cent = shcents + fstep * threadIdx.x;
// stage 1 - accumulate distances
for (uint16_t fpos = 0; fpos < d_features_size; fpos += fstep) {
__syncthreads();
const uint16_t fsize = min(
fstep, static_cast<uint32_t>(d_features_size - fpos));
uint32_t cbase = x * bs + threadIdx.x;
if (cbase < d_clusters_size) {
for (uint16_t f = 0; f < fsize; f++) {
my_cent[f] = centroids[cbase * d_features_size + fpos + f];
}
}
__syncthreads();
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size
&& (x * bs + ti) < d_clusters_size) {
auto other_cent = d_clusters_size <= bs?
shcents + (y * bs + threadIdx.x) * fstep
:
centroids + (y * bs + threadIdx.x) * d_features_size + fpos;
distances[(y * bs + threadIdx.x) * d_clusters_size + x * bs + ti] +=
METRIC<M, F>::partial(other_cent, shcents + ti * fstep, fsize);
}
}
}
// stage 2 - finalize the distances
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size
&& (x * bs + ti) < d_clusters_size) {
uint32_t di = (y * bs + threadIdx.x) * d_clusters_size + x * bs + ti;
float dist = distances[di];
dist = METRIC<M, F>::finalize(dist);
distances[di] = dist;
}
}
}
__global__ void knn_mirror_cluster_distances(float *__restrict__ distances) {
const uint32_t bs = CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t x, y;
const uint32_t n = dupper(d_clusters_size, bs);
{
float tmp = n + 0.5;
float d = _sqrt(tmp * tmp - 2 * blockIdx.x);
y = tmp - d;
x = blockIdx.x + y + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
}
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size && (x * bs + ti) < d_clusters_size) {
distances[(x * bs + ti) * d_clusters_size + y * bs + threadIdx.x] =
distances[(y * bs + threadIdx.x) * d_clusters_size + x * bs + ti];
}
}
}
FPATTR void push_sample(uint16_t k, float dist, uint32_t index, float *heap) {
uint16_t pos = 0;
while (true) {
float left, right;
bool left_le, right_le;
if ((2 * pos + 1) < k) {
left = heap[4 * pos + 2];
left_le = dist >= left;
} else {
left_le = true;
}
if ((2 * pos + 2) < k) {
right = heap[4 * pos + 4];
right_le = dist >= right;
} else {
right_le = true;
}
if (left_le && right_le) {
heap[2 * pos] = dist;
*reinterpret_cast<uint32_t *>(heap + 2 * pos + 1) = index;
break;
}
if (!left_le && !right_le) {
if (left <= right) {
heap[2 * pos] = right;
heap[2 * pos + 1] = heap[4 * pos + 5];
pos = 2 * pos + 2;
} else {
heap[2 * pos] = left;
heap[2 * pos + 1] = heap[4 * pos + 3];
pos = 2 * pos + 1;
}
} else if (left_le) {
heap[2 * pos] = right;
heap[2 * pos + 1] = heap[4 * pos + 5];
pos = 2 * pos + 2;
} else {
heap[2 * pos] = left;
heap[2 * pos + 1] = heap[4 * pos + 3];
pos = 2 * pos + 1;
}
}
}
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_assign_shmem(
uint32_t offset, uint32_t length, uint16_t k,
const float *__restrict__ cluster_distances,
const float *__restrict__ cluster_radiuses,
const F *__restrict__ samples, const F *__restrict__ centroids,
const uint32_t *assignments, const uint32_t *inv_asses,
const uint32_t *inv_asses_offsets, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
sample += offset;
volatile uint32_t mycls = assignments[sample];
volatile float mydist = METRIC<M, F>::distance_t(
samples, centroids + mycls * d_features_size, d_samples_size, sample);
extern __shared__ float buffer[];
float *volatile mynearest = buffer + k * 2 * threadIdx.x;
volatile float mndist = FLT_MAX;
for (int i = 0; i < static_cast<int>(k); i++) {
mynearest[i * 2] = FLT_MAX;
}
uint32_t pos_start = inv_asses_offsets[mycls];
uint32_t pos_finish = inv_asses_offsets[mycls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
if (sample == other_sample) {
continue;
}
float dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
for (uint32_t cls = 0; cls < d_clusters_size; cls++) {
if (cls == mycls) {
continue;
}
float cdist = cluster_distances[cls * d_clusters_size + mycls];
if (cdist != cdist) {
continue;
}
float dist = cdist - mydist - cluster_radiuses[cls];
if (dist > mndist) {
continue;
}
uint32_t pos_start = inv_asses_offsets[cls];
uint32_t pos_finish = inv_asses_offsets[cls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
}
for (int i = k - 1; i >= 0; i--) {
neighbors[(sample - offset) * k + i] = reinterpret_cast<uint32_t*>(mynearest)[1];
push_sample(k, -1, UINT32_MAX, mynearest);
}
}
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_assign_gmem(
uint32_t offset, uint32_t length, uint16_t k,
const float *__restrict__ cluster_distances,
const float *__restrict__ cluster_radiuses,
const F *__restrict__ samples, const F *__restrict__ centroids,
const uint32_t *assignments, const uint32_t *inv_asses,
const uint32_t *inv_asses_offsets, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
sample += offset;
volatile uint32_t mycls = assignments[sample];
volatile float mydist = METRIC<M, F>::distance_t(
samples, centroids + mycls * d_features_size, d_samples_size, sample);
float *volatile mynearest =
reinterpret_cast<float*>(neighbors) + (sample - offset) * k * 2;
volatile float mndist = FLT_MAX;
for (int i = 0; i < static_cast<int>(k); i++) {
mynearest[i * 2] = FLT_MAX;
}
uint32_t pos_start = inv_asses_offsets[mycls];
uint32_t pos_finish = inv_asses_offsets[mycls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
if (sample == other_sample) {
continue;
}
float dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
for (uint32_t cls = 0; cls < d_clusters_size; cls++) {
if (cls == mycls) {
continue;
}
float cdist = cluster_distances[cls * d_clusters_size + mycls];
if (cdist != cdist) {
continue;
}
float dist = cdist - mydist - cluster_radiuses[cls];
if (dist > mndist) {
continue;
}
pos_start = inv_asses_offsets[cls];
pos_finish = inv_asses_offsets[cls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
}
for (int i = 0; i < k; i++) {
uint32_t imax = reinterpret_cast<uint32_t*>(mynearest)[1];
push_sample(k - i - 1, mynearest[2 * k - 2 * i - 2],
reinterpret_cast<uint32_t*>(mynearest)[2 * k - 2 * i - 1],
mynearest);
reinterpret_cast<uint32_t*>(mynearest)[2 * k - 2 * i - 1] = imax;
}
for (int i = 0; i < k; i++) {
reinterpret_cast<uint32_t*>(mynearest)[i] =
reinterpret_cast<uint32_t*>(mynearest)[2 * i + 1];
}
}
__global__ void knn_assign_gmem_deinterleave1(
uint32_t length, uint16_t k, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
if (sample % 2 == 1) {
for (int i = 0; i < k; i++) {
neighbors[sample * k + i] = neighbors[sample * 2 * k + i];
}
} else {
for (int i = 0; i < k; i++) {
neighbors[(length + sample) * k + k + i] = neighbors[sample * 2 * k + i];
}
}
}
__global__ void knn_assign_gmem_deinterleave2(
uint32_t length, uint16_t k, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
sample *= 2;
if (sample >= length) {
return;
}
for (int i = 0; i < k; i++) {
neighbors[sample * k + i] = neighbors[(length + sample) * k + k + i];
}
}
extern "C" {
KMCUDAResult knn_cuda_setup(
uint32_t h_samples_size, uint16_t h_features_size, uint32_t h_clusters_size,
const std::vector<int> &devs, int32_t verbosity) {
FOR_EACH_DEV(
CUCH(cudaMemcpyToSymbol(d_samples_size, &h_samples_size, sizeof(h_samples_size)),
kmcudaMemoryCopyError);
CUCH(cudaMemcpyToSymbol(d_features_size, &h_features_size, sizeof(h_features_size)),
kmcudaMemoryCopyError);
CUCH(cudaMemcpyToSymbol(d_clusters_size, &h_clusters_size, sizeof(h_clusters_size)),
kmcudaMemoryCopyError);
uint64_t zero = 0;
CUCH(cudaMemcpyToSymbol(d_dists_calced, &zero, sizeof(d_dists_calced)),
kmcudaMemoryCopyError);
);
return kmcudaSuccess;
}
int knn_cuda_neighbors_mem_multiplier(uint16_t k, int dev, int verbosity) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props, dev);
int shmem_size = static_cast<int>(props.sharedMemPerBlock);
int needed_shmem_size = KNN_BLOCK_SIZE_SHMEM * 2 * k * sizeof(uint32_t);
if (needed_shmem_size > shmem_size) {
INFO("device #%d: needed shmem size %d > %d => using global memory\n",
dev, needed_shmem_size, shmem_size);
return 2;
}
return 1;
}
KMCUDAResult knn_cuda_calc(
uint16_t k, uint32_t h_samples_size, uint32_t h_clusters_size,
uint16_t h_features_size, KMCUDADistanceMetric metric,
const std::vector<int> &devs, int fp16x2, int verbosity,
const udevptrs<float> &samples, const udevptrs<float> ¢roids,
const udevptrs<uint32_t> &assignments, const udevptrs<uint32_t> &inv_asses,
const udevptrs<uint32_t> &inv_asses_offsets, udevptrs<float> *distances,
udevptrs<float>* sample_dists, udevptrs<float> *radiuses,
udevptrs<uint32_t> *neighbors) {
auto plan = distribute(h_clusters_size, h_features_size * sizeof(float), devs);
if (verbosity > 1) {
print_plan("plan_calc_radiuses", plan);
}
INFO("calculating the cluster radiuses...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (length == 0) {
continue;
}
dim3 block(CLUSTER_RADIUSES_BLOCK_SIZE, 1, 1);
dim3 grid(upper(h_clusters_size, block.x), 1, 1);
float *dsd;
if (h_clusters_size * h_clusters_size >= h_samples_size) {
dsd = (*distances)[devi].get();
} else {
dsd = (*sample_dists)[devi].get();
}
KERNEL_SWITCH(knn_calc_cluster_radiuses, <<<grid, block>>>(
offset, length, inv_asses[devi].get(), inv_asses_offsets[devi].get(),
reinterpret_cast<const F*>(centroids[devi].get()),
reinterpret_cast<const F*>(samples[devi].get()),
dsd, (*radiuses)[devi].get()));
);
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
FOR_OTHER_DEVS(
CUP2P(radiuses, offset, length);
);
);
if (h_clusters_size * h_clusters_size >= h_samples_size) {
CUMEMSET_ASYNC(*distances, 0, h_samples_size);
}
uint32_t dist_blocks_dim = upper(
h_clusters_size, static_cast<uint32_t>(CLUSTER_DISTANCES_BLOCK_SIZE));
uint32_t dist_blocks_n = (2 * dist_blocks_dim + 1) * (2 * dist_blocks_dim + 1) / 8;
plan = distribute(dist_blocks_n, 512, devs);
{ // align across CLUSTER_DISTANCES_BLOCK_SIZE horizontal boundaries
uint32_t align = 0;
for (auto& p : plan) {
uint32_t offset, length;
std::tie(offset, length) = p;
offset += align;
std::get<0>(p) = offset;
uint32_t n = dist_blocks_dim;
float tmp = n + 0.5;
float d = sqrt(tmp * tmp - 2 * (offset + length));
uint32_t y = tmp - d;
uint32_t x = offset + length + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
if (x > 0) {
align = n - y - x;
std::get<1>(p) += align;
}
}
}
if (verbosity > 1) {
print_plan("plan_calc_cluster_distances", plan);
}
INFO("calculating the centroid distance matrix...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (length == 0) {
continue;
}
dim3 block(CLUSTER_DISTANCES_BLOCK_SIZE, 1, 1);
dim3 grid(length, 1, 1);
KERNEL_SWITCH(knn_calc_cluster_distances, <<<grid, block>>>(
offset, reinterpret_cast<const F*>(centroids[devi].get()),
(*distances)[devi].get()));
);
FOR_EACH_DEVI(
uint32_t y_start, y_finish;
{
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
float tmp = dist_blocks_dim + 0.5;
float d = sqrt(tmp * tmp - 2 * offset);
y_start = tmp - d;
d = sqrt(tmp * tmp - 2 * (offset + length));
y_finish = tmp - d;
}
if (y_finish == y_start) {
continue;
}
uint32_t p_offset = y_start * h_clusters_size * CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t p_size = (y_finish - y_start) * h_clusters_size * CLUSTER_DISTANCES_BLOCK_SIZE;
p_size = std::min(p_size, h_clusters_size * h_clusters_size - p_offset);
FOR_OTHER_DEVS(
CUP2P(distances, p_offset, p_size);
);
);
FOR_EACH_DEVI(
dim3 block(CLUSTER_DISTANCES_BLOCK_SIZE, 1, 1);
dim3 grid(dist_blocks_n, 1, 1);
knn_mirror_cluster_distances<<<grid, block>>>((*distances)[devi].get());
);
plan = distribute(h_samples_size, h_features_size * sizeof(float), devs);
INFO("searching for the nearest neighbors...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (knn_cuda_neighbors_mem_multiplier(k, devs[devi], 1) == 2) {
dim3 block(KNN_BLOCK_SIZE_GMEM, 1, 1);
dim3 grid(upper(h_samples_size, block.x), 1, 1);
KERNEL_SWITCH(knn_assign_gmem, <<<grid, block>>>(
offset, length, k, (*distances)[devi].get(), (*radiuses)[devi].get(),
reinterpret_cast<const F*>(samples[devi].get()),
reinterpret_cast<const F*>(centroids[devi].get()),
assignments[devi].get(), inv_asses[devi].get(),
inv_asses_offsets[devi].get(), (*neighbors)[devi].get()));
knn_assign_gmem_deinterleave1<<<grid, block>>>(
length, k, (*neighbors)[devi].get());
dim3 grid2(upper(h_samples_size, 2 * block.x), 1, 1);
knn_assign_gmem_deinterleave2<<<grid2, block>>>(
length, k, (*neighbors)[devi].get());
} else {
dim3 block(KNN_BLOCK_SIZE_SHMEM, 1, 1);
dim3 grid(upper(h_samples_size, block.x), 1, 1);
KERNEL_SWITCH(
knn_assign_shmem,
<<<grid, block, KNN_BLOCK_SIZE_SHMEM * 2 * k * sizeof(uint32_t)>>>(
offset, length, k, (*distances)[devi].get(), (*radiuses)[devi].get(),
reinterpret_cast<const F*>(samples[devi].get()),
reinterpret_cast<const F*>(centroids[devi].get()),
assignments[devi].get(), inv_asses[devi].get(),
inv_asses_offsets[devi].get(), (*neighbors)[devi].get()));
}
);
uint64_t dists_calced = 0;
FOR_EACH_DEV(
uint64_t h_dists_calced = 0;
CUCH(cudaMemcpyFromSymbol(&h_dists_calced, d_dists_calced, sizeof(h_dists_calced)),
kmcudaMemoryCopyError);
DEBUG("#%d dists_calced: %" PRIu64 "\n", dev, h_dists_calced);
dists_calced += h_dists_calced;
);
uint64_t max_dists_calced = static_cast<uint64_t>(h_samples_size) * h_samples_size;
INFO("calculated %f of all the distances\n", (dists_calced + .0) / max_dists_calced);
return kmcudaSuccess;
}
} // extern "C"
|
69fa34cdf01c4e09be2f571c9d9764603e3cee32.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include "../NativeOps.h"
#include <hip/hip_runtime.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <helpers/shape.h>
#include "../Environment.h"
#include <helpers/TAD.h>
#include <ops/specials.h>
#include <loops/reduce3.h>
#include <loops/indexreduce.h>
#include <loops/summarystatsreduce.h>
#include <loops/random.h>
#include <loops/broadcasting.h>
#include <loops/broadcasting_bool.h>
#include <loops/scalar.h>
#include <loops/scalar_bool.h>
#include <loops/pairwise_transform.h>
#include <loops/pairwise_bool.h>
#include <loops/transform_same.h>
#include <loops/transform_float.h>
#include <loops/transform_strict.h>
#include <loops/transform_bool.h>
#include <loops/transform_any.h>
#include <loops/reduce_float.h>
#include <loops/reduce_same.h>
#include <loops/reduce_bool.h>
#include <loops/reduce_long.h>
//#include <thread>
#include <map>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
#include <stdlib.h>
#include <loops/type_conversions.h>
#include <op_boilerplate.h>
#include <loops/aggregates.h>
#include <helpers/threshold.h>
#include <ShapeList.h>
#include <Context.h>
#include <ops/specials_cuda.h>
#include <helpers/DebugHelper.h>
#include <graph/exceptions/datatype_exception.h>
#include <helpers/CudaLaunchHelper.h>
// FIXME: we need cuda-specific implementations
#include <helpers/logger.h>
#include <NDArray.h>
#include <GraphExecutioner.h>
#include <graph/GraphHolder.h>
#include <graph/VariablesSet.h>
#include <ops/declarable/OpRegistrator.h>
#include <ops/declarable/CustomOperations.h>
#include <PointersManager.h>
//#include <sys/time.h>
#include <hiprand/hiprand.h>
#include <Status.h>
#include <helpers/DebugHelper.h>
using namespace nd4j;
#include <loops/special_kernels.h>
hipDeviceProp_t *deviceProperties;
hipFuncAttributes *funcAttributes = new hipFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __ND4J_EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
typedef struct {
long streamId;
long callId;
} __syncInfo;
typedef __syncInfo SyncInfo;
/**
* This is utility kernel, that updates given special buffer with proper values in device memory
*/
extern "C" __global__ void prepareShapeBuffer(int *dimension, int *maxDimension, Nd4jLong *specialPointer, int rows, nd4j::DataType dataType) {
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > 0)
return;
dimension[0] = 0;
maxDimension[0] = 1;
specialPointer[0] = 2;
specialPointer[1] = rows;
specialPointer[2] = 1;
specialPointer[3] = 1;
specialPointer[4] = 1;
specialPointer[5] = 0;
specialPointer[6] = 1;
specialPointer[7] = 99;
ArrayOptions::setDataType(specialPointer, dataType);
//printf("special[0]: [%lld]\n", (long long) specialPointer[0]);
//shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer);
}
// this method isn't used, left here for legacy and caution purposes
// TLDR: don't use this way, it sucks
void CUDART_CB syncCallback(hipStream_t stream, hipError_t status, void *data){
SyncInfo *sync = reinterpret_cast<SyncInfo *>(data);
//printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId);
}
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jLong)ptrToDeviceId;
}
template <typename T>
dim3 getOptimalDimensions(Nd4jLong n,hipFuncAttributes attributes, hipDeviceProp_t properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if (num_blocks > blockLimit) num_blocks = blockLimit;
if (num_blocks < 4 && n > 128) {
num_blocks = 4;
num_threads = n / num_blocks;
}
if (num_threads >= 768) {
num_blocks = num_blocks * 2;
num_threads = num_threads / 2;
}
if(n % num_threads && num_blocks < blockLimit) ++num_blocks;
//(num_threads * sizeof(T)) + attributes.sharedSizeBytes);
return dim3(num_blocks,num_threads, 3000);
}
int getBaseMemorySize(int xRank, hipFuncAttributes funcAttr) {
int memory_limit = 256; //funcAttr.sharedSizeBytes;
// TODO: remove this later
memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4)
/*
if (xRank == 0) xRank = 2;
memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes
memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4;
memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4;
memory_limit += (xRank * 4) * 6;
memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase
*/
return memory_limit;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, hipFuncAttributes funcAttr) {
int countMP = deviceProperties[deviceId].multiProcessorCount;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int num_threads = problemLength / (countMP * blockThreshold);
num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads);
num_threads = nd4j::math::nd4j_max<int>(num_threads, 64);
num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads);
int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr);
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit);
return launchDims;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) {
int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int warpSize = deviceProperties[deviceId].warpSize;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
// round num_threads to nearest warpSize
num_threads -= num_threads % warpSize;
num_threads = nd4j::math::nd4j_max<int>(1, num_threads);
if (num_threads < warpSize && tadLength < warpSize)
num_threads = tadLength;
// since we use shared memory as fast memory for some cases - we need to count that in
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int memory_floor = memory_limit;
int effective_block_limit = countMP * blockThreshold;
int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit);
int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize);
// at this moment we've stored all required information for things. time to count in reduction multipliers
int reduction_per_block = 0;
bool found = false;
if (reduction > 0)
while (!found) {
reduction_per_block = (num_threads * elementSize * reduction);
if (memory_limit + reduction_per_block < desiredShared) {
memory_limit += reduction_per_block;
found = true;
} else {
if (num_threads > minThreads) {
num_threads -= 32;
} else {
memory_limit += reduction_per_block;
found = true;
}
}
}
// at this moment we know total memory used per block, and we also know per-mp limit.
int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block);
// we don't want to spawn more blocks, that gpu can actually handle without queue
//num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// if (num_blocks > countMP)
// num_blocks = num_blocks - (num_blocks % countMP);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
reduction_per_block = (num_threads * elementSize * reduction);
memory_limit = memory_floor + reduction_per_block;
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP);
return dim3(num_blocks,num_threads, memory_limit);
}
/*
* This method returns kernel launch param for linear memory access
*/
dim3 getFlatLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *dYShapeInfo, hipFuncAttributes funcAttr) {
auto xRank = shape::rank(dXShapeInfo);
auto yRank = dYShapeInfo == nullptr ? 0 : shape::rank(dYShapeInfo);
auto zRank = 0;
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
auto xLength = shape::length(dXShapeInfo);
int effective_block_limit = countMP * blockThreshold;
// for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here
int num_threads = xLength / effective_block_limit;
if (num_threads < minThreads)
num_threads = minThreads;
num_threads = num_threads - (num_threads % 32);
int memory_floor = memory_limit;
int num_blocks = xLength / num_threads;
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
}
if (xLength / num_threads > blockLimit)
num_blocks *= 2;
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit);
return launchDims;
}
/**
* This method returns kernel launch params with TAD-based memory access
*
* @param deviceId
* @param dXShapeInfo
* @param tadShapeInfo
* @param funcAttr
* @param dimensionLength
* @param elementSize
* @param reductionSize
* @return
*/
dim3 getReduceLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *tadShapeInfo, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) {
Nd4jLong tadLength = 0;
Nd4jLong numTads = 0;
if (tadShapeInfo != nullptr) {
tadLength = shape::length(tadShapeInfo);
numTads = shape::length(dXShapeInfo) / tadLength;
if (tadLength == 1) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("A xLength: [%i], zLength: [%i]\n", shape::length(dXShapeInfo), shape::length(tadShapeInfo));
}
} else{
// we have special case - reduction along all dimensions
tadLength = nd4j::math::nd4j_min<int>(shape::length(dXShapeInfo), 768);
numTads = shape::length(dXShapeInfo) / tadLength;
}
auto xRank = shape::rank(dXShapeInfo);
int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo);
dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize);
if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.dX == 1
printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.dX: [%i], launchDims.dY: [%i], launchDims.dZ: [%i]\n", shape::length(dXShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z);
}
return launchDims;
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(const Nd4jLong *hXShapeInfo, hipFuncAttributes attributes, hipDeviceProp_t properties) {
auto n = shape::length(hXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(hipStream_t stream) {
Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<Nd4jLong> *scalarDimension;
nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(hipStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer");
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
Nd4jLong *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
Nd4jLong * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
Nd4jLong * getDimensionHostPointer() {
return scalarDimension->data;
}
Nd4jLong * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
hipStream_t streamRef;
public:
ScalarInfo(hipStream_t stream) {
T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer");
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
Nd4jLong *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the dZ pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
Nd4jLong *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void NativeOps::execPairwiseTransform(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
dim3 launchDims(256, 1024, 8192);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransform both operands must have same data type", xType, yType);
if (xType != zType && yType != zType)
throw std::runtime_error("NativeOps::execPairwiseTransform requires Z operand to have either X or Y type");
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES, LIBND4J_TYPES)
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES)
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execPairwiseTransformBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isB(zType))
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool wrong Z operand data type", nd4j::DataType::BOOL, zType);
if (yType != xType)
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool both operands must have same data type", xType, yType);
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 1024, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::pairwise_transforms::PairWiseBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), LIBND4J_TYPES, BOOL_TYPES)
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execSummaryStatsScalar(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
bool biasCorrected) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execBroadcastBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOps::execBroadcastBool requires Z operand to have BOOL type");
if (yType != xType)
throw std::runtime_error("NativeOps::execBroadcastBool requires both X & Y operands to have same type");
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims(256, 256, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, BOOL_TYPES)
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcast(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
/*
hipEvent_t start;
hipEventCreateWithFlags(&start, hipEventDisableTiming);
timespec tsX;
timespec tsY;
clock_gettime(CLOCK_REALTIME, &tsX);
*/
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
void NativeOps::execReduceFloat(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("FF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw std::runtime_error("NativeOps::execReduceFloat requires Z operand to have floating point type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, FLOAT_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceFloat(...) failed");
}
void NativeOps::execReduceSame(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("SF8 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != xType)
throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed");
}
void NativeOps::execReduceSame(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("SF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xRank = shape::rank(hXShapeInfo);
if (zType != xType)
throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceLong(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("LF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType);
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, LONG_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceLong(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("LF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("BF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::BOOL)
throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type");
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, BOOL_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("BF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::BOOL)
throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduce(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
Nd4jLong *hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
Nd4jLong *dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
Nd4jLong *dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execIndexReduce requires Z operand to have INT64 type", zType);
auto dz = reinterpret_cast<Nd4jLong*>(dZ);
BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F8 opNum:[%i]\n", opNum);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX,dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, FLOAT_TYPES);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
void NativeOps::execIndexReduceScalar(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo){
if (nd4j::Environment::getInstance()->isDebug())
printf("F1 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// void *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AF1 opNum:[%i]\n", opNum);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
// FIXME: we want Z to be one of integer types
//if (!DataTypeUtils::isZ(zType))
// throw nd4j::datatype_exception("NativeOps::execIndexReduceScalar requires Z operand to have one of integer types")
if (zType != nd4j::DataType::INT64)
throw nd4j::datatype_exception::build("NativeOps::exeIndexReduceScalar requires Z operand to have INT64 data type", zType);
auto dz = reinterpret_cast<Nd4jLong*>(dZ);
BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, nullptr, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalar(...) failed");
}
void NativeOps::execTransformSame(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (xType != zType)
throw std::runtime_error("NativeOps::execTransformSame requires X & Z to have same type");
//nd4j_printf("Going to execute transformSame; opNum: %i\n", opNum);
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execTransformSame(...) failed");
}
void NativeOps::execTransformBool(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOps::execTransformBool requires Z to have same boolean type");
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES);
}
void NativeOps::execTransformAny(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
switch (opNum) {
case transform::IsMax: {
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
auto special = reinterpret_cast<double *>(extraPointers[17]);
if (scalarCheat) {
auto scalarShape = ShapeBuilders::createScalarShapeInfo(nd4j::DataType::INT64);
/**
* In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call
*/
execIndexReduceScalar(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, scalarShape, special, nullptr);
Nd4jLong maxIdx = -119;
checkCudaErrors(hipStreamSynchronize(*stream));
hipMemcpyAsync(&maxIdx, special, sizeof(Nd4jLong), hipMemcpyDeviceToHost, *stream);
checkCudaErrors(hipStreamSynchronize(*stream));
int targetIdx = 0;
if (shape::order(hXShapeInfo) == 'c' || shape::order(hXShapeInfo) == 'f' && maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1] >= shape::length(hXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1];
dim3 launchDims(1, 512, 1024);
BUILD_SINGLE_SELECTOR(zType, fillIsMaxGeneric, (launchDims, stream, dZ, shape::length(hZShapeInfo), targetIdx), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
delete[] scalarShape;
} else {
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostTShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[19]);
auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
int dimensionLength = getDeviceId(extraPointers[18]);
auto cshape = ShapeBuilders::createVectorShapeInfo(nd4j::DataType::INT32, dimensionLength);
// we call for IMax on specified dimension
execIndexReduce(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostTShapeInfo, special, hostYShapeInfo, nullptr, cshape, dimension, nullptr);
DEBUG_KERNEL(stream, opNum);
dim3 launchDims(256, 256, 16384);
// at this point, all IMax indexes are gathered, and we execute filler
BUILD_SINGLE_SELECTOR(zType, fillDimensionalIsMaxGeneric, (launchDims, stream, special, dZ, dZShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
delete[] cshape;
}
}
break;
default: {
dim3 launchDims(512, 512, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, LIBND4J_TYPES);
}
}
}
void NativeOps::execTransformStrict(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (xType != zType || !DataTypeUtils::isR(xType))
throw datatype_exception::build("NativeOps::execTransformStrict requires X & Z to have same floating point type", xType, zType);
switch (opNum) {
case transform::SoftMax:
case transform::SoftMaxDerivative:
case transform::LogSoftMax: {
if (shape::isVector(hXShapeInfo)) {
int length = shape::length(hXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
auto reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(double) * 4);
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, reductionPointer, nullptr, nullptr), FLOAT_TYPES);
} else {
auto shape = shape::shapeOf(hXShapeInfo);
auto llocPointer = reinterpret_cast<int *>(extraPointers[3]);
auto reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// special pointer for special buffer for special ops
auto specialPointer = reinterpret_cast<double *>(extraPointers[6]);
auto dimension = reinterpret_cast<int *>(specialPointer);
auto maxDimension = dimension + 1;
auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1);
auto special = reinterpret_cast<double *> (maxShapeBuffer + (MAX_RANK * 2 + 4));
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
Nd4jLong maxShape[2] = {shape::shapeOf(hXShapeInfo)[0], 1};
auto hostMaxShapeBuffer = nd4j::ShapeBuilders::createShapeInfo(xType, 'c', 2, maxShape);
auto cshape = ShapeBuilders::createVectorShapeInfo(nd4j::DataType::INT32, 1);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
hipLaunchKernelGGL(( prepareShapeBuffer), dim3(1), dim3(1), 128, *stream, dimension, maxDimension, maxShapeBuffer, shape[0], xType);
DEBUG_KERNEL(stream, opNum);
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceSame(tempPointers, reduce::Max, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer,
nullptr, cshape, maxDimension, nullptr);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcast(tempPointers, broadcast::Subtract, hX, hXShapeInfo, dX, dXShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, cshape, dimension, nullptr);
DEBUG_KERNEL(stream, opNum);
// exp 3
execTransformStrict(extraPointers, transform::Exp, hZ, hZShapeInfo, dZ, dZShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceSame(tempPointers, reduce::Sum, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer,
nullptr, cshape, maxDimension, nullptr);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcast(tempPointers, broadcast::Divide, hZ, hZShapeInfo, dZ, dZShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo,
nullptr, cshape, dimension, nullptr);
DEBUG_KERNEL(stream, opNum);
// log 3
if (opNum == transform::LogSoftMax)
execTransformStrict(extraPointers, transform::Log, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams);
else if (opNum == transform::SoftMaxDerivative)
execTransformStrict(extraPointers, transform::SpecialDerivative, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams);
nd4j::DebugHelper::checkErrorCode(stream, "SoftMax(...) failed");
delete hostMaxShapeBuffer;
delete[] cshape;
}
}
break;
default: {
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES);
}
}
}
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw datatype_exception::build("NativeOps::execTransformFloat requires Z to have floating point type", zType);
if (opNum == transform::Histogram) {
dim3 launchDims(256, 256, 32768);
Nd4jPointer maskedAllocPointer;
auto length = shape::length(hZShapeInfo);
bool onDevice = true;
auto res = hipMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * DataTypeUtils::sizeOf(nd4j::DataType::INT64));
if (res != 0) {
onDevice = false;
hipHostMalloc(&maskedAllocPointer, length * launchDims.x * DataTypeUtils::sizeOf(nd4j::DataType::INT64), hipHostMallocDefault);
}
auto imaskedAllocPointer = reinterpret_cast<int *>(maskedAllocPointer);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, imaskedAllocPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
checkCudaErrors(hipStreamSynchronize(*stream));
if (onDevice)
hipFree(maskedAllocPointer);
else
hipHostFree(maskedAllocPointer);
} else {
dim3 launchDims(512, 512, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param dZ the dZ array
* @param dZShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flatten(Nd4jPointer *extraPointers,
int offset,
char order,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hInput, Nd4jLong *hInputShapeInfo,
void *dInput, Nd4jLong *dInputShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F22 opNum:[7]\n");
// int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF222 opNum:[7]\n");
auto type = nd4j::ArrayOptions::dataType(hInputShapeInfo);
BUILD_SINGLE_SELECTOR(type, flattenKernelGeneric, (launchDims, stream, extraPointers, offset, order, dZ, dZShapeInfo, dInput, dInputShapeInfo), LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::checkP2P() {
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
hipSetDevice(dX);
hipDeviceCanAccessPeer(&canAccess, dX , dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
hipSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void NativeOps::enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
hipSetDevice(dX);
hipDeviceCanAccessPeer(&canAccess, dX , dY);
if (canAccess) {
if (enable) {
hipDeviceEnablePeerAccess(dY, 0);
} else {
hipDeviceDisablePeerAccess(dY);
}
} else {
if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
hipSetDevice(curDevice);
}
allowedP2P = enable;
hipSetDevice(curDevice);
}
bool NativeOps::isP2PAvailable() {
return supportedP2P;
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
deviceProperties = new hipDeviceProp_t[devCnt];
for (int i = 0; i < devCnt; i++) {
hipSetDevice(i);
hipGetDeviceProperties(&deviceProperties[i], i);
hipDeviceSetLimit(hipLimitStackSize, 4096);
}
hipSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
}
void NativeOps::initializeFunctions(Nd4jPointer *functions) {
nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions);
/*
this->hipblasSgemv = (CublasSgemv)functions[0];
this->hipblasDgemv = (CublasDgemv)functions[1];
this->hipblasHgemm = (CublasHgemm)functions[2];
this->hipblasSgemm = (CublasSgemm)functions[3];
this->hipblasDgemm = (CublasDgemm)functions[4];
this->cublasSgemmEx = (CublasSgemmEx)functions[5];
this->hipblasHgemmBatched = (CublasHgemmBatched)functions[6];
this->hipblasSgemmBatched = (CublasSgemmBatched)functions[7];
this->hipblasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(Nd4jLong memorySize, int flags) {
Nd4jPointer pointer;
// hipHostMallocMapped |hipHostMallocPortable
hipError_t res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize, hipHostMallocDefault);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(Nd4jLong memorySize, int deviceId, int flags) {
Nd4jPointer pointer;
auto res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int NativeOps::freeHost(Nd4jPointer pointer) {
hipError_t res = hipHostFree(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int NativeOps::freeDevice(Nd4jPointer pointer, int deviceId) {
hipError_t res = hipFree(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
Nd4jPointer NativeOps::createContext() {
return 0L;
}
Nd4jPointer NativeOps::createStream() {
Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(hipStream_t));
CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream");
hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream));
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipStreamCreate(...) failed");
return nativeStream;
}
Nd4jPointer NativeOps::createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer");
hipError_t dZ = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipEventCreateWithFlags(...) failed");
return nativeEvent;
}
int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipError_t dZ = hipEventRecord(*pEvent, *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipEventRecord(...) failed");
return 1;
}
int NativeOps::setDevice(int deviceId) {
auto dZ = hipSetDevice(deviceId);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipSetDevice(...) failed");
return 1;
}
Nd4jLong NativeOps::getDeviceFreeMemory() {
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
return (Nd4jLong) memFree;
}
Nd4jLong NativeOps::getDeviceFreeMemory(int device) {
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jLong) memFree;
}
Nd4jLong NativeOps::getDeviceTotalMemory(int device) {
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jLong) memTotal;
}
int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
return memcpyAsync(dst, src, size, flags, reserved);
}
int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipMemcpyKind kind;
DEBUG_KERNEL(pStream, 0);
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
default: {
printf("UNDEFINED MEMCPY!\n");
break;
}
}
hipError_t dZ = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream);
if (dZ != 0) {
checkCudaErrors(dZ);
printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
throw std::runtime_error("hipMemcpyAsync(...) failed");
//return 0L;
}
return 1;
}
int NativeOps::memset(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipError_t dZ = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipMemset(...) failed");
return 1;
}
int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipError_t dZ = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipMemsetAsync(...) failed");
return 1;
}
int NativeOps::destroyEvent(Nd4jPointer event) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipError_t dZ = hipEventDestroy(*pEvent);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaEvenDestroy(...) failed");
return 1;
}
int NativeOps::streamSynchronize(Nd4jPointer stream) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipError_t dZ = hipStreamSynchronize(*pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipStreamSynchronize(...) failed");
return 1L;
}
int NativeOps::eventSynchronize(Nd4jPointer event) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipError_t dZ = hipEventSynchronize(*pEvent);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipEventSynchronize(...) failed");
return 1L;
}
int NativeOps::getAvailableDevices() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
return devCnt;
}
void NativeOps::enableDebugMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setDebug(reallyEnable);
}
void NativeOps::setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int NativeOps::ompGetMaxThreads() {
return maxThreads;
}
int NativeOps::ompGetNumThreads() {
return maxThreads;
}
void NativeOps::setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void NativeOps::enableVerboseMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setVerbose(reallyEnable);
}
int NativeOps::getDeviceMajor(int device) {
return deviceProperties[device].major;
}
int NativeOps::getDeviceMinor(int device) {
return deviceProperties[device].minor;
}
const char * NativeOps::getDeviceName(int device) {
return deviceProperties[device].name;
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void concatCuda(const int numOfArrs, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) {
__shared__ int arrIdx, blocksPerArr;
__shared__ T *x, *z;
__shared__ Nd4jLong *zShapeInfo, *xShapeInfo, arrLen, arrLenZ, arrLenPerBlock, start, end;
if (threadIdx.x == 0) {
blocksPerArr = (gridDim.x - gridDim.x % numOfArrs) / numOfArrs; // floor
arrIdx = blockIdx.x / blocksPerArr;
if (arrIdx >= numOfArrs)
arrIdx = numOfArrs - 1;
x = reinterpret_cast<T*>(reinterpret_cast<void**>(pVx)[arrIdx]);
z = reinterpret_cast<T*>(reinterpret_cast<void**>(pVz)[arrIdx]);
xShapeInfo = reinterpret_cast<Nd4jLong**>(pxShapeInfo)[arrIdx];
zShapeInfo = reinterpret_cast<Nd4jLong**>(pzShapeInfo)[arrIdx];
arrLen = shape::length(xShapeInfo);
arrLenZ = shape::length(zShapeInfo);
arrLenPerBlock = (arrLen + blocksPerArr - arrLen % blocksPerArr) / blocksPerArr; // ceil
start = arrLenPerBlock * (blockIdx.x % blocksPerArr);
end = (start + arrLenPerBlock) > arrLen ? arrLen : (start + arrLenPerBlock);
}
__syncthreads();
for (Nd4jLong i = threadIdx.x + start; i < end; i += blockDim.x) {
auto zOffset = shape::getIndexOffset(i, zShapeInfo, arrLenZ);
auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLen);
//printf("z[%i][%lld] = x[%i][%lld]\n", arrIdx, zOffset, arrIdx, xOffset);
z[zOffset] = x[xOffset];
}
}
template<typename T>
__host__ static void concatCudaLauncher(const int numOfArrs, hipStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) {
//int blocks = numOfArrs * 16; // >> 1 << 2);
//nd4j_printf("gridDim.x is %i\n", blocks);
//if (blocks > 8192)
// blocks = 8192; // restrict grid dims to 8K max
hipLaunchKernelGGL(( concatCuda<T>), dim3(numOfArrs), dim3(128), 512, *stream, numOfArrs, pVx, pxShapeInfo, pVz, pzShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "concat(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void concatCudaLauncher, (const int numOfArrs, hipStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo), LIBND4J_TYPES);
static void
specialBufferAndShapeWithOffset(void* vZ, Nd4jLong* hZShapeInfo, Nd4jLong* dZShapeInfo, std::vector<Nd4jLong> const& idx, void*& outBuffer, Nd4jLong*& outShape) {
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
const int rank = shape::rank(hZShapeInfo);
Nd4jLong* newShape = new Nd4jLong[shape::shapeInfoLength(rank)];
//ALLOCATE(newShape, nullptr, , Nd4jLong)
auto shapeSize = shape::shapeInfoByteLength(rank);
memcpy(newShape, hZShapeInfo, shapeSize);
auto shapeOf = shape::shapeOf(newShape);
auto stridesOf = shape::stride(newShape);
Nd4jLong offset(0), subArrLen(1);
int n(2), first, last, stride;
for (int d = rank - 1; d >= 0; --d) {
if (idx[n * d] != idx[n * d + 1]) {
auto axeDim = shape::sizeAt(hZShapeInfo, d);
first = idx[n * d] >= 0 ? idx[n * d] : idx[n * d] + axeDim + 1;
last = idx[n * d + 1] >= 0 ? idx[n * d + 1] : idx[n * d + 1] + axeDim + 1;
stride = 1;
shapeOf[d] = (last - first + stride - 1) / stride; // ceil (last - first) / stride;
offset += first * stridesOf[d];
if(shapeOf[d] != 1)
stridesOf[d] *= stride;
}
subArrLen *= shapeOf[d];
}
// check if there is possibility to set ews = 1
shape::setEws(newShape, subArrLen);
//makeBothBuffersActual();
outBuffer = (void*)((int8_t*)vZ + offset * DataTypeUtils::sizeOfElement(zType));
hipError_t err = hipMalloc(&outShape, shapeSize);
if (err != 0) {
printf("Cannot allocate memory with error %d\n", err);
throw std::runtime_error("Cannot allocate memory for shape");
}
hipMemcpy(outShape, newShape, shapeSize, hipMemcpyHostToDevice);
delete [] newShape;
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data, Nd4jPointer *inputShapeInfo,
Nd4jPointer *ddata, Nd4jPointer *dinputShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hXShapeInfo = hZShapeInfo;
auto hShapePointers = reinterpret_cast<Nd4jLong **>(inputShapeInfo);
auto dShapePointers = reinterpret_cast<Nd4jLong **>(dinputShapeInfo);
// numArrays will be used as number of TADs, so each block process 1 input
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto axis = dimension;
const int rank = shape::rank(reinterpret_cast<Nd4jLong*>(inputShapeInfo[0]));
const int rank2 = 2 * rank;
std::vector<std::vector<Nd4jLong>> indices(numArrays, std::vector<Nd4jLong>(rank2 == 0?2:rank2,0));
// take into account indices for first array
auto axisSize = shape::sizeAt(reinterpret_cast<Nd4jLong*>(inputShapeInfo[0]), axis);
// nd4j_printf("Set up indices...", "");
// nd4j_printf("\n\n\tElement 0 at %i is setting\n", 2 * axis + 1);
indices[0][2 * axis + 1] = axisSize;
// nd4j_printf("\n\n\tElement 0 at %i was set\n", 2 * axis + 1);
// loop through the rest of input arrays
for(int i = 1; i < numArrays; ++i) {
// nd4j_printf("\tIteration %i:\n", i);
indices[i][2 * axis] = indices[i - 1][2 * axis + 1]; // index start from
// nd4j_printf("\n\n\tindices[%i][%i] was set\n", i, 2 * axis);
indices[i][2 * axis + 1] = indices[i - 1][2 * axis + 1] + shape::sizeAt(reinterpret_cast<Nd4jLong*>(inputShapeInfo[i]), axis); // index end with (excluding)
// nd4j_printf("\tindices[%i][%i] was set\n", i, 2 * axis + 1);
}
// nd4j_printf(" done\n", "");
// nd4j_printf("Pack output shapes and buffers...", "");
std::vector<void*> outSubArrsBuffs(numArrays);
std::vector<Nd4jLong*> outSubArrsShapes(numArrays);
for(int i = 0; i < numArrays; ++i) {
specialBufferAndShapeWithOffset(dZ, hZShapeInfo, dZShapeInfo, indices[i], outSubArrsBuffs[i], outSubArrsShapes[i]);
}
// nd4j_printf(" done\n", "");
// nd4j_printf("Prepare device pointers...", "");
// prepare arrays of pointers on buffers and shapes
std::vector<void*> hOutBuffers(numArrays), hInBuffers(numArrays);
std::vector<Nd4jLong*> hOutShapeInfo(numArrays), hInShapeInfo(numArrays);
for(int i = 0; i < numArrays; ++i) {
hOutBuffers[i] = outSubArrsBuffs[i];
hInBuffers[i] = ddata[i];//->getSpecialBuffer();
hOutShapeInfo[i] = outSubArrsShapes[i];
hInShapeInfo[i] = (Nd4jLong*)(dShapePointers[i]);//->getSpecialShapeInfo();
// nd4j_printf("X_%i shape ptr: %p; data ptr: %p;\n", i, hInShapeInfo[i], hInBuffers[i]);
}
// nd4j_printf(" done\n", "");
LaunchContext context(stream);
// allocate and copy all buffers and shapes arrays to global memory
PointersManager manager(&context, "NativeOps::concat");
void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*));
void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*));
void* dInShapeInfo = manager.replicatePointer(hInShapeInfo.data(), hInShapeInfo.size() * sizeof(Nd4jLong*));
void* dOutShapeInfo = manager.replicatePointer(hOutShapeInfo.data(), hOutShapeInfo.size() * sizeof(Nd4jLong*));
// nd4j_printf("Concat itself run...", "");
BUILD_SINGLE_SELECTOR(zType, concatCudaLauncher, (numArrays, stream, dInBuffers, dInShapeInfo, dOutBuffers, dOutShapeInfo), LIBND4J_TYPES);
manager.synchronize();
// nd4j_printf(" done\n", "");
// nd4j_printf("Postprocessing...", "");
// hipError_t res = hipStreamSynchronize(*stream);
// checkCudaErrors(res);
// nd4j::DebugHelper::checkErrorCode(stream, "Legacy ConcatFloat(...) failed");
// nd4j_printf(" done\n", "");
// nd4j_printf("Free up rest...", "");
hipError_t err;
for(int i = 0; i < numArrays; ++i) {
err = hipFree(outSubArrsShapes[i]);
if (err != 0) {
printf("Error %d occured when shape %i was deallocating.\n", err, i);
throw std::runtime_error("Cannot deallocate memory for shapes.");
}
}
// nd4j_printf(" done\n", "");
// nd4j_printf("All done!!!\n", "");
}
void NativeOps::specialConcat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
void *dZ,
Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
dZ,
dZShapeInfo);
}
/**
* This method saves
*/
void NativeOps::tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *target, Nd4jLong *offsets) {
//nd4j_printf("START ------->\n","");
//nd4j_printf("Shape pointer: [%p]\n", dXShapeInfo);
//nd4j_printf("Dimension pointer: [%p]\n", dimension);
//nd4j_printf("shape rank: [%i]; dimLength: [%i]\n", shape::rank(dXShapeInfo), dimensionLength);
//shape::printShapeInfoLinear(dXShapeInfo);
//fflush(stdout);
//shape::printArray<int>(reinterpret_cast<void*>(dimension), dimensionLength, "dimensions");
//fflush(stdout);
//nd4j_printf("END ------->\n","");
//shape::TAD tad;
//tad.init(dXShapeInfo, dimension, dimensionLength);
//nd4j_printf("Creating TAD shape...\n","");
//tad.createTadOnlyShapeInfo();
//nd4j_printf("Creating TAD offsets...\n","");
//tad.createOffsets();
//nd4j_printf("memcpy TAD shape...\n","");
//std::memcpy(reinterpret_cast<void *>(target), tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo));
//nd4j_printf("memcpy TAD offsets...\n","");
//std::memcpy(reinterpret_cast<void *>(offsets), tad.tadOffsets, tad.numTads * sizeof(Nd4jLong));
//nd4j_printf("memcpy finished...\n","");
auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(dXShapeInfo, dimension, dimensionLength);
std::memcpy(reinterpret_cast<void *>(target), tadPack.primaryShapeInfo(), shape::shapeInfoByteLength(tadPack.primaryShapeInfo()));
std::memcpy(reinterpret_cast<void *>(offsets), tadPack.primaryOffsets(), tadPack.numberOfTads() * sizeof(Nd4jLong));
}
int NativeOps::memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
}
//hipError_t dZ = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
hipError_t dZ = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("hipMemcpyToSymbolAsync(...) failed");
return 1;
}
Nd4jPointer NativeOps::getConstantSpace() {
Nd4jPointer dConstAddr;
hipError_t dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0)
throw std::runtime_error("hipGetSymbolAddress(...) failed");
return dConstAddr;
}
void NativeOps::pullRows(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jLong n,
Nd4jLong *indexes,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets,
Nd4jLong *zTadShapeInfo,
Nd4jLong *zTadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::average(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong *xShapeInfo,
Nd4jPointer *dx, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dz, Nd4jLong *dzShapeInfo,
int n,
Nd4jLong length,
bool propagate) {
hipStream_t * stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageFloat called\n");
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES);
}
}
void NativeOps::accumulate(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong *xShapeInfo,
Nd4jPointer *dx, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dz, Nd4jLong *dzShapeInfo,
int n,
Nd4jLong length) {
auto stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateFloat called\n");
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n,length), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES);
}
}
void NativeOps::shuffle(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jPointer *xShapeInfo,
Nd4jPointer *dx, Nd4jPointer *dXShapeInfo,
Nd4jPointer *z, Nd4jPointer *zShapeInfo,
Nd4jPointer *dz, Nd4jPointer *dZShapeInfo,
int N,
int *shuffleMap,
Nd4jPointer *tadShapeInfo,
Nd4jPointer *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo);
auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
auto xType = nd4j::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(256, 512, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "shuffle(...) failed");
}
/*
void NativeOps::execMetaPredicateShape(Nd4jPointer *extras,
const int opTypeA,
const int opNumA,
const int opTypeB,
const int opNumB,
Nd4jLong N,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraA,
void *extraB,
double scalarA,
double scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, functions::grid::GRIDShaped, ::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraA, extraB, scalarA, scalarB), LIBND4J_TYPES);
// functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dy, dYShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
*/
bool NativeOps::isExperimentalEnabled() {
return nd4j::Environment::getInstance()->isExperimentalBuild();
}
void NativeOps::setOmpMinThreads(int threads) {
minThreads = nd4j::math::nd4j_max<int>(32, threads);
minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads);
}
int NativeOps::getDevice() {
int curDevice = -1;
hipGetDevice(&curDevice);
return curDevice;
}
void NativeOps::setElementThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::setTADThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
bool biasCorrected) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape,
bool biasCorrected,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execReduce3(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
dim3 launchDims(256, 256, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES)
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduce3(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES)
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduce3Scalar(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execScalarBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalar, Nd4jLong *hScalarShapeInfo,
void *dScalar, Nd4jLong *dScalarShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (xType != yType )
throw std::runtime_error("NativeOps::execScalarBool requires X & Y to have same type");
if (!DataTypeUtils::isB(zType) )
throw std::runtime_error("NativeOps::execScalarBool requires Z operand to have BOOL type");
BUILD_DOUBLE_SELECTOR(xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, BOOL_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalars, Nd4jLong *hScalarShapeInfo,
void *dScalars, Nd4jLong *dScalarShapeInfo,
void *extraParams,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (xType != yType )
throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires X & Y to have same type", xType, yType);
if (!DataTypeUtils::isB(zType) )
throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires Z operand to have BOOL type", nd4j::DataType::BOOL, zType);
BUILD_DOUBLE_SELECTOR(xType, yType, functions::scalar::ScalarBoolTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalar(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalar, Nd4jLong *hScalarShapeInfo,
void *dScalar, Nd4jLong *dScalarShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType);
if (!Environment::getInstance()->isExperimentalBuild() && Environment::getInstance()->isDebug()) {
auto sX = DataTypeUtils::asString(xType);
auto sY = DataTypeUtils::asString(yType);
auto sZ = DataTypeUtils::asString(zType);
nd4j_printf("Running execScalar with dtypes: [%s], [%s], [%s]\n", sX.c_str(), sY.c_str(), sZ.c_str());
}
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalar(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalars, Nd4jLong *hScalarShapeInfo,
void *dScalars, Nd4jLong *dScalarShapeInfo,
void *extraParams,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execAggregate(Nd4jPointer *extraPointers,
int opNum,
void **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
void *realArguments,
int numRealArguments,
nd4j::DataType dtype) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateKernelGeneric(launchDims, stream, opNum, arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), FLOAT_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed");
}
void NativeOps::execAggregateBatch(Nd4jPointer *extraPointers,
int numAggregates, int opNum,
int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize,
int maxIdx, int maxReals,
void *ptrToArguments, nd4j::DataType dtype) {
// not implemented yet
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateBatchKernelGeneric(launchDims, stream, opNum, numAggregates, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), FLOAT_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execRandom(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer stateHost,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
auto rng = reinterpret_cast<nd4j::graph::RandomGenerator *>(stateHost);
Nd4jPointer stateDevice;
bool onDevice = false;
dim3 launchDims(512, 512, 32768);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
if (res == 0) {
onDevice = true;
checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream));
} else {
hipHostMalloc(&stateDevice, sizeOf, hipHostMallocDefault);
std::memcpy(stateDevice, stateHost, sizeOf);
}
BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction, ::executeCudaSingle(launchDims, extraPointers, opNum, stateDevice, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(hipStreamSynchronize(*stream));
if (onDevice)
hipFree(stateDevice);
else
hipHostFree(stateDevice);
rng->rewindH(shape::length(hZShapeInfo));
}
void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
auto rng = reinterpret_cast<nd4j::graph::RandomGenerator *>(stateHost);
Nd4jPointer stateDevice;
dim3 launchDims(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo);
bool onDevice = false;
auto res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
if (res == 0) {
onDevice = true;
checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream));
}else {
hipHostMalloc(&stateDevice, sizeOf, hipHostMallocDefault);
std::memcpy(stateDevice, stateHost, sizeOf);
}
BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaDouble(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(hipStreamSynchronize(*stream));
if (onDevice)
hipFree(stateDevice);
else
hipHostFree(stateDevice);
rng->rewindH(shape::length(hZShapeInfo));
}
void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
auto rng = reinterpret_cast<nd4j::graph::RandomGenerator *>(stateHost);
Nd4jPointer stateDevice;
dim3 launchDims(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo);
bool onDevice = false;
auto res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
if (res == 0) {
onDevice = true;
checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream));
} else {
hipHostMalloc(&stateDevice, sizeOf, hipHostMallocDefault);
std::memcpy(stateDevice, stateHost, sizeOf);
}
BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaTriple(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(hipStreamSynchronize(*stream));
if (onDevice)
hipFree(stateDevice);
else
hipHostFree(stateDevice);
rng->rewindH(shape::length(hZShapeInfo));
}
Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// hipStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
hipDeviceSynchronize();
delete buffer;
}
void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream);
}
void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) {
auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer NativeOps::pointerForAddress(Nd4jLong address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void NativeOps::tear(Nd4jPointer *extras,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
Nd4jPointer *targets,
Nd4jLong *zShapeInfo,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
}
void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<hipStream_t *>(&extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(&extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (nd4j::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = nd4j::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048)
sharedMemSize = 2048;
if (sharedMemLastBlock < 2048)
sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
nd4j::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed");
}
void NativeOps::encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 1024);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed");
}
void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
//encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz);
prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed");
}
void NativeOps::encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 4096);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed");
}
void NativeOps::decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 1024);
auto zType = nd4j::ArrayOptions::dataType(zShapeInfo);
BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed");
}
void NativeOps::execReduce3All(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParamsVals,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape,
Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims(shape::length(hZShapeInfo), 256, 32768);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AD119 opNum:[%i]\n", opNum);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType);
if (yType != xType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), LIBND4J_TYPES, FLOAT_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::sort(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
bool descending) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[ 1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES);
}
}
} else {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES);
n>>=1;
rev = 1;
} while(n > 1);
}
}
nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed");
}
void NativeOps::sortTad(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
int *dimension,
int dimensionLength,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets,
bool descending) {
// to be implemented
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims(tadPack.numberOfTads(), 1024, 33768);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "sortTadFloat(...) failed");
}
void NativeOps::sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
Nd4jLong NativeOps::encodeBitmap(Nd4jPointer *extraPointers,
void *dx, Nd4jLong *hXShapeInfo,
Nd4jLong N,
int *dz,
float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed");
Nd4jLong dZ = (Nd4jLong) resultPointer[0];
resultPointer[0] = 0;
return dZ;
}
void NativeOps::decodeBitmap(Nd4jPointer *extraPointers,
void *dx,
Nd4jLong N,
void *dz, Nd4jLong *zShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xType = nd4j::ArrayOptions::dataType(zShapeInfo);
BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed");
}
Nd4jLong* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) {
return nullptr;
}
void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) {
}
nd4j::graph::ResultWrapper* NativeOps::executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nd4j::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer);
}
const char* NativeOps::getAllCustomOps() {
return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations();
}
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) {
nd4j::graph::VariableSpace varSpace;
Context block(2, &varSpace);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numBArgs; e++)
block.getBArguments()->push_back(bArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
auto array = new nd4j::NDArray(buffer_, shape_);
array->triggerAllocationFlag(false, false);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.workspace() != nullptr)
shapeList->detach();
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs);
}
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
Context block(1);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<nd4j::NDArray*> inputs(numInputs);
std::vector<nd4j::NDArray*> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(numBArgs);
std::vector<Nd4jLong> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
inputs[e] = new nd4j::NDArray(buffer, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e]));
void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
// FIXME: revisit this.
bool canNullify = true;
for (int i = 0; i < numInputs; i++) {
void *ibuffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i];
if (ibuffer == buffer) {
canNullify = false;
break;
}
}
if (canNullify)
memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape)));
auto array = new nd4j::NDArray(buffer, shape);
outputs[e] = array;
// and we want to release shape copy once we're done
array->triggerAllocationFlag(false, true);
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
for (int e = 0; e < numBArgs; e++)
bbArgs[e] = bArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace);
//auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
//shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]);
//shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo());
//outputs[e]->printIndexedBuffer("C++ raw output");
//outputs[e]->printBuffer("C++ indexed output");
if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])));
}
/*
if (!isInplace) {
if (dZ->size() != numOutputs) {
return ND4J_STATUS_BAD_OUTPUT;
}
for (int e = 0; e < numOutputs; e++) {
auto buffer = (T *) outputBuffers[e];
auto shape = (int *) outputShapes[e];
nd4j::NDArray<T> tmp(buffer, shape);
if (tmp.lengthOf() != dZ->at(e)->lengthOf()) {
nd4j_printf("Provided output array for [%s] has length of %i, but actual dZ has length of %i\n", op->getOpName()->c_str(), tmp.lengthOf(), dZ->at(e)->lengthOf());
return ND4J_STATUS_BAD_OUTPUT;
}
tmp.assign(dZ->at(e));
}
} else {
// if op is inplace, our ResultSet holds pointers
dZ->purge();
}
delete dZ;
*/
for (auto v: inputs)
delete v;
for (auto v: outputs)
delete v;
return Status::OK();
}
int NativeOps::execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
}
int NativeOps::execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
auto context = reinterpret_cast<Context*>(opContext);
return op->execute(context);
}
int NativeOps::registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<nd4j::NDArray*> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new nd4j::graph::VariablesSet(dZ);
if (dZ == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet* NativeOps::executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) {
nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId);
return ND4J_STATUS_OK;
}
void NativeOps::deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void NativeOps::deleteIntArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void NativeOps::deleteLongArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<Nd4jLong *>(pointer);
delete[] ptr;
}
template <typename T>
static void deleteVariablesSetT(Nd4jPointer pointer) {
nd4j::graph::VariablesSet* ptr = reinterpret_cast<nd4j::graph::VariablesSet*>(pointer);
delete ptr;
}
void NativeOps::deleteVariablesSet(Nd4jPointer pointer) {
deleteVariablesSetT<double>(pointer);
}
void NativeOps::deleteShapeList(Nd4jPointer shapeList) {
nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList);
list->destroy();
delete list;
}
const char* NativeOps::getAllOperations() {
return nd4j::OpTracker::getInstance()->exportOperations();
}
Nd4jPointer NativeOps::getGraphState(Nd4jLong id) {
return (Nd4jPointer) new nd4j::graph::GraphState(id);
}
void NativeOps::deleteGraphState(Nd4jPointer state) {
auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state);
delete stateP;
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int) scopes[e];
if (!state->hasScope(scopeId)) {
// nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Status::THROW();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK())
return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace to the same ID
//varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK();
}
Nd4jStatus NativeOps::execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState*>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs);
}
void NativeOps::deleteResultWrapper(Nd4jPointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr);
delete p;
}
int NativeOps::estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ);
*/
void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
//nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
//nd4j::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//nd4j::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
}
Nd4jPointer NativeOps::createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) {
auto u = new nd4j::utf8string(string, length);
return reinterpret_cast<Nd4jPointer>(u);
}
void NativeOps::deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
delete(reinterpret_cast<nd4j::utf8string*>(ptr));
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs,
void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets,
void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets,
const int* indexes) {
__shared__ T *x, *y;
__shared__ Nd4jLong arrLenX, arrLenY;
for (int e = 0; e < numOfSubArrs; e++ ) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner)
continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T*>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T*>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY)
return;
for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLenX);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo, arrLenY);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template<typename T>
__host__ static void scatterUpdateCudaLauncher(const hipStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) {
hipLaunchKernelGGL(( scatterUpdateCuda<T>), dim3(512), dim3(256), MAX_NUM_THREADS, *stream, opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void NativeOps::scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs,
void* hX, Nd4jLong* hXShapeInfo, Nd4jLong* hXOffsets,
void* dX, Nd4jLong* dXShapeInfo, Nd4jLong* dXOffsets,
void* hY, Nd4jLong* hYShapeInfo, Nd4jLong* hYOffsets,
void* dY, Nd4jLong* dYShapeInfo, Nd4jLong* dYOffsets,
int* hIindexes, int* dIndexes) {
auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
nd4j::DataType type = ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(type, scatterUpdateCudaLauncher, (stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIndexes), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed");
}
void NativeOps::inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) {
auto p = reinterpret_cast<nd4j::DebugInfo*>(debugInfo);
NDArray array(buffer, shapeInfo, nullptr);
nd4j::DebugHelper::retrieveDebugStatistics(p, &array);
}
void __global__ tryPointerKernel(void* p, int len) {
auto buf = reinterpret_cast<int8_t*>(p);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int b;
if (tid < len)
atomicAdd(&b, buf[tid]);
__syncthreads();
if (threadIdx.x ==0 && blockIdx.x == 0)
printf("Pointer check complete: %i\n", b);
}
void NativeOps::tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) {
hipStream_t stream;
hipStreamCreate(&stream);
hipLaunchKernelGGL(( tryPointerKernel), dim3(256), dim3(512), len+64, stream, p, len);
auto e = hipStreamSynchronize(stream);
if (e != 0)
throw std::runtime_error("tryPointer failed");
hipStreamDestroy(stream);
}
|
69fa34cdf01c4e09be2f571c9d9764603e3cee32.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include "../NativeOps.h"
#include <cuda.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <helpers/shape.h>
#include "../Environment.h"
#include <helpers/TAD.h>
#include <ops/specials.h>
#include <loops/reduce3.h>
#include <loops/indexreduce.h>
#include <loops/summarystatsreduce.h>
#include <loops/random.h>
#include <loops/broadcasting.h>
#include <loops/broadcasting_bool.h>
#include <loops/scalar.h>
#include <loops/scalar_bool.h>
#include <loops/pairwise_transform.h>
#include <loops/pairwise_bool.h>
#include <loops/transform_same.h>
#include <loops/transform_float.h>
#include <loops/transform_strict.h>
#include <loops/transform_bool.h>
#include <loops/transform_any.h>
#include <loops/reduce_float.h>
#include <loops/reduce_same.h>
#include <loops/reduce_bool.h>
#include <loops/reduce_long.h>
//#include <thread>
#include <map>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
#include <stdlib.h>
#include <loops/type_conversions.h>
#include <op_boilerplate.h>
#include <loops/aggregates.h>
#include <helpers/threshold.h>
#include <ShapeList.h>
#include <Context.h>
#include <ops/specials_cuda.h>
#include <helpers/DebugHelper.h>
#include <graph/exceptions/datatype_exception.h>
#include <helpers/CudaLaunchHelper.h>
// FIXME: we need cuda-specific implementations
#include <helpers/logger.h>
#include <NDArray.h>
#include <GraphExecutioner.h>
#include <graph/GraphHolder.h>
#include <graph/VariablesSet.h>
#include <ops/declarable/OpRegistrator.h>
#include <ops/declarable/CustomOperations.h>
#include <PointersManager.h>
//#include <sys/time.h>
#include <curand.h>
#include <Status.h>
#include <helpers/DebugHelper.h>
using namespace nd4j;
#include <loops/special_kernels.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __ND4J_EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
typedef struct {
long streamId;
long callId;
} __syncInfo;
typedef __syncInfo SyncInfo;
/**
* This is utility kernel, that updates given special buffer with proper values in device memory
*/
extern "C" __global__ void prepareShapeBuffer(int *dimension, int *maxDimension, Nd4jLong *specialPointer, int rows, nd4j::DataType dataType) {
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > 0)
return;
dimension[0] = 0;
maxDimension[0] = 1;
specialPointer[0] = 2;
specialPointer[1] = rows;
specialPointer[2] = 1;
specialPointer[3] = 1;
specialPointer[4] = 1;
specialPointer[5] = 0;
specialPointer[6] = 1;
specialPointer[7] = 99;
ArrayOptions::setDataType(specialPointer, dataType);
//printf("special[0]: [%lld]\n", (long long) specialPointer[0]);
//shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer);
}
// this method isn't used, left here for legacy and caution purposes
// TLDR: don't use this way, it sucks
void CUDART_CB syncCallback(cudaStream_t stream, cudaError_t status, void *data){
SyncInfo *sync = reinterpret_cast<SyncInfo *>(data);
//printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId);
}
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jLong)ptrToDeviceId;
}
template <typename T>
dim3 getOptimalDimensions(Nd4jLong n,cudaFuncAttributes attributes, cudaDeviceProp properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if (num_blocks > blockLimit) num_blocks = blockLimit;
if (num_blocks < 4 && n > 128) {
num_blocks = 4;
num_threads = n / num_blocks;
}
if (num_threads >= 768) {
num_blocks = num_blocks * 2;
num_threads = num_threads / 2;
}
if(n % num_threads && num_blocks < blockLimit) ++num_blocks;
//(num_threads * sizeof(T)) + attributes.sharedSizeBytes);
return dim3(num_blocks,num_threads, 3000);
}
int getBaseMemorySize(int xRank, cudaFuncAttributes funcAttr) {
int memory_limit = 256; //funcAttr.sharedSizeBytes;
// TODO: remove this later
memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4)
/*
if (xRank == 0) xRank = 2;
memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes
memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4;
memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4;
memory_limit += (xRank * 4) * 6;
memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase
*/
return memory_limit;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, cudaFuncAttributes funcAttr) {
int countMP = deviceProperties[deviceId].multiProcessorCount;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int num_threads = problemLength / (countMP * blockThreshold);
num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads);
num_threads = nd4j::math::nd4j_max<int>(num_threads, 64);
num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads);
int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr);
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit);
return launchDims;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) {
int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int warpSize = deviceProperties[deviceId].warpSize;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
// round num_threads to nearest warpSize
num_threads -= num_threads % warpSize;
num_threads = nd4j::math::nd4j_max<int>(1, num_threads);
if (num_threads < warpSize && tadLength < warpSize)
num_threads = tadLength;
// since we use shared memory as fast memory for some cases - we need to count that in
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int memory_floor = memory_limit;
int effective_block_limit = countMP * blockThreshold;
int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit);
int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize);
// at this moment we've stored all required information for things. time to count in reduction multipliers
int reduction_per_block = 0;
bool found = false;
if (reduction > 0)
while (!found) {
reduction_per_block = (num_threads * elementSize * reduction);
if (memory_limit + reduction_per_block < desiredShared) {
memory_limit += reduction_per_block;
found = true;
} else {
if (num_threads > minThreads) {
num_threads -= 32;
} else {
memory_limit += reduction_per_block;
found = true;
}
}
}
// at this moment we know total memory used per block, and we also know per-mp limit.
int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block);
// we don't want to spawn more blocks, that gpu can actually handle without queue
//num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// if (num_blocks > countMP)
// num_blocks = num_blocks - (num_blocks % countMP);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
reduction_per_block = (num_threads * elementSize * reduction);
memory_limit = memory_floor + reduction_per_block;
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP);
return dim3(num_blocks,num_threads, memory_limit);
}
/*
* This method returns kernel launch param for linear memory access
*/
dim3 getFlatLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *dYShapeInfo, cudaFuncAttributes funcAttr) {
auto xRank = shape::rank(dXShapeInfo);
auto yRank = dYShapeInfo == nullptr ? 0 : shape::rank(dYShapeInfo);
auto zRank = 0;
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
auto xLength = shape::length(dXShapeInfo);
int effective_block_limit = countMP * blockThreshold;
// for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here
int num_threads = xLength / effective_block_limit;
if (num_threads < minThreads)
num_threads = minThreads;
num_threads = num_threads - (num_threads % 32);
int memory_floor = memory_limit;
int num_blocks = xLength / num_threads;
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
}
if (xLength / num_threads > blockLimit)
num_blocks *= 2;
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit);
return launchDims;
}
/**
* This method returns kernel launch params with TAD-based memory access
*
* @param deviceId
* @param dXShapeInfo
* @param tadShapeInfo
* @param funcAttr
* @param dimensionLength
* @param elementSize
* @param reductionSize
* @return
*/
dim3 getReduceLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *tadShapeInfo, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) {
Nd4jLong tadLength = 0;
Nd4jLong numTads = 0;
if (tadShapeInfo != nullptr) {
tadLength = shape::length(tadShapeInfo);
numTads = shape::length(dXShapeInfo) / tadLength;
if (tadLength == 1) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("A xLength: [%i], zLength: [%i]\n", shape::length(dXShapeInfo), shape::length(tadShapeInfo));
}
} else{
// we have special case - reduction along all dimensions
tadLength = nd4j::math::nd4j_min<int>(shape::length(dXShapeInfo), 768);
numTads = shape::length(dXShapeInfo) / tadLength;
}
auto xRank = shape::rank(dXShapeInfo);
int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo);
dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize);
if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.dX == 1
printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.dX: [%i], launchDims.dY: [%i], launchDims.dZ: [%i]\n", shape::length(dXShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z);
}
return launchDims;
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(const Nd4jLong *hXShapeInfo, cudaFuncAttributes attributes, cudaDeviceProp properties) {
auto n = shape::length(hXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(cudaStream_t stream) {
Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<Nd4jLong> *scalarDimension;
nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer");
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
Nd4jLong *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
Nd4jLong * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
Nd4jLong * getDimensionHostPointer() {
return scalarDimension->data;
}
Nd4jLong * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer");
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
Nd4jLong *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the dZ pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
Nd4jLong *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void NativeOps::execPairwiseTransform(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
dim3 launchDims(256, 1024, 8192);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransform both operands must have same data type", xType, yType);
if (xType != zType && yType != zType)
throw std::runtime_error("NativeOps::execPairwiseTransform requires Z operand to have either X or Y type");
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES, LIBND4J_TYPES)
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES)
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execPairwiseTransformBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isB(zType))
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool wrong Z operand data type", nd4j::DataType::BOOL, zType);
if (yType != xType)
throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool both operands must have same data type", xType, yType);
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 1024, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::pairwise_transforms::PairWiseBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), LIBND4J_TYPES, BOOL_TYPES)
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execSummaryStatsScalar(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
bool biasCorrected) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execBroadcastBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOps::execBroadcastBool requires Z operand to have BOOL type");
if (yType != xType)
throw std::runtime_error("NativeOps::execBroadcastBool requires both X & Y operands to have same type");
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims(256, 256, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, BOOL_TYPES)
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcast(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
/*
cudaEvent_t start;
cudaEventCreateWithFlags(&start, cudaEventDisableTiming);
timespec tsX;
timespec tsY;
clock_gettime(CLOCK_REALTIME, &tsX);
*/
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
void NativeOps::execReduceFloat(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("FF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw std::runtime_error("NativeOps::execReduceFloat requires Z operand to have floating point type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, FLOAT_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceFloat(...) failed");
}
void NativeOps::execReduceSame(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("SF8 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != xType)
throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed");
}
void NativeOps::execReduceSame(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("SF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xRank = shape::rank(hXShapeInfo);
if (zType != xType)
throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceLong(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("LF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType);
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, LONG_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceLong(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("LF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("BF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::BOOL)
throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type");
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, BOOL_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduceBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("BF7 opNum:[%i]\n", opNum);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (zType != nd4j::DataType::BOOL)
throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduce(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
Nd4jLong *hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
Nd4jLong *dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
Nd4jLong *dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
if (zType != nd4j::DataType::INT64)
throw datatype_exception::build("NativeOps::execIndexReduce requires Z operand to have INT64 type", zType);
auto dz = reinterpret_cast<Nd4jLong*>(dZ);
BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F8 opNum:[%i]\n", opNum);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xRank = shape::rank(hXShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX,dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, FLOAT_TYPES);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
void NativeOps::execIndexReduceScalar(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo){
if (nd4j::Environment::getInstance()->isDebug())
printf("F1 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// void *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AF1 opNum:[%i]\n", opNum);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
// FIXME: we want Z to be one of integer types
//if (!DataTypeUtils::isZ(zType))
// throw nd4j::datatype_exception("NativeOps::execIndexReduceScalar requires Z operand to have one of integer types")
if (zType != nd4j::DataType::INT64)
throw nd4j::datatype_exception::build("NativeOps::exeIndexReduceScalar requires Z operand to have INT64 data type", zType);
auto dz = reinterpret_cast<Nd4jLong*>(dZ);
BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, nullptr, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalar(...) failed");
}
void NativeOps::execTransformSame(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (xType != zType)
throw std::runtime_error("NativeOps::execTransformSame requires X & Z to have same type");
//nd4j_printf("Going to execute transformSame; opNum: %i\n", opNum);
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execTransformSame(...) failed");
}
void NativeOps::execTransformBool(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOps::execTransformBool requires Z to have same boolean type");
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES);
}
void NativeOps::execTransformAny(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
switch (opNum) {
case transform::IsMax: {
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
auto special = reinterpret_cast<double *>(extraPointers[17]);
if (scalarCheat) {
auto scalarShape = ShapeBuilders::createScalarShapeInfo(nd4j::DataType::INT64);
/**
* In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call
*/
execIndexReduceScalar(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, scalarShape, special, nullptr);
Nd4jLong maxIdx = -119;
checkCudaErrors(cudaStreamSynchronize(*stream));
cudaMemcpyAsync(&maxIdx, special, sizeof(Nd4jLong), cudaMemcpyDeviceToHost, *stream);
checkCudaErrors(cudaStreamSynchronize(*stream));
int targetIdx = 0;
if (shape::order(hXShapeInfo) == 'c' || shape::order(hXShapeInfo) == 'f' && maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1] >= shape::length(hXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1];
dim3 launchDims(1, 512, 1024);
BUILD_SINGLE_SELECTOR(zType, fillIsMaxGeneric, (launchDims, stream, dZ, shape::length(hZShapeInfo), targetIdx), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
delete[] scalarShape;
} else {
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostTShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[19]);
auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
int dimensionLength = getDeviceId(extraPointers[18]);
auto cshape = ShapeBuilders::createVectorShapeInfo(nd4j::DataType::INT32, dimensionLength);
// we call for IMax on specified dimension
execIndexReduce(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostTShapeInfo, special, hostYShapeInfo, nullptr, cshape, dimension, nullptr);
DEBUG_KERNEL(stream, opNum);
dim3 launchDims(256, 256, 16384);
// at this point, all IMax indexes are gathered, and we execute filler
BUILD_SINGLE_SELECTOR(zType, fillDimensionalIsMaxGeneric, (launchDims, stream, special, dZ, dZShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
delete[] cshape;
}
}
break;
default: {
dim3 launchDims(512, 512, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, LIBND4J_TYPES);
}
}
}
void NativeOps::execTransformStrict(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (xType != zType || !DataTypeUtils::isR(xType))
throw datatype_exception::build("NativeOps::execTransformStrict requires X & Z to have same floating point type", xType, zType);
switch (opNum) {
case transform::SoftMax:
case transform::SoftMaxDerivative:
case transform::LogSoftMax: {
if (shape::isVector(hXShapeInfo)) {
int length = shape::length(hXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
auto reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(double) * 4);
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, reductionPointer, nullptr, nullptr), FLOAT_TYPES);
} else {
auto shape = shape::shapeOf(hXShapeInfo);
auto llocPointer = reinterpret_cast<int *>(extraPointers[3]);
auto reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// special pointer for special buffer for special ops
auto specialPointer = reinterpret_cast<double *>(extraPointers[6]);
auto dimension = reinterpret_cast<int *>(specialPointer);
auto maxDimension = dimension + 1;
auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1);
auto special = reinterpret_cast<double *> (maxShapeBuffer + (MAX_RANK * 2 + 4));
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
Nd4jLong maxShape[2] = {shape::shapeOf(hXShapeInfo)[0], 1};
auto hostMaxShapeBuffer = nd4j::ShapeBuilders::createShapeInfo(xType, 'c', 2, maxShape);
auto cshape = ShapeBuilders::createVectorShapeInfo(nd4j::DataType::INT32, 1);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
prepareShapeBuffer<<<1, 1, 128, *stream>>>(dimension, maxDimension, maxShapeBuffer, shape[0], xType);
DEBUG_KERNEL(stream, opNum);
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceSame(tempPointers, reduce::Max, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer,
nullptr, cshape, maxDimension, nullptr);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcast(tempPointers, broadcast::Subtract, hX, hXShapeInfo, dX, dXShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, cshape, dimension, nullptr);
DEBUG_KERNEL(stream, opNum);
// exp 3
execTransformStrict(extraPointers, transform::Exp, hZ, hZShapeInfo, dZ, dZShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceSame(tempPointers, reduce::Sum, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer,
nullptr, cshape, maxDimension, nullptr);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcast(tempPointers, broadcast::Divide, hZ, hZShapeInfo, dZ, dZShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo,
nullptr, cshape, dimension, nullptr);
DEBUG_KERNEL(stream, opNum);
// log 3
if (opNum == transform::LogSoftMax)
execTransformStrict(extraPointers, transform::Log, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams);
else if (opNum == transform::SoftMaxDerivative)
execTransformStrict(extraPointers, transform::SpecialDerivative, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams);
nd4j::DebugHelper::checkErrorCode(stream, "SoftMax(...) failed");
delete hostMaxShapeBuffer;
delete[] cshape;
}
}
break;
default: {
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES);
}
}
}
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw datatype_exception::build("NativeOps::execTransformFloat requires Z to have floating point type", zType);
if (opNum == transform::Histogram) {
dim3 launchDims(256, 256, 32768);
Nd4jPointer maskedAllocPointer;
auto length = shape::length(hZShapeInfo);
bool onDevice = true;
auto res = cudaMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * DataTypeUtils::sizeOf(nd4j::DataType::INT64));
if (res != 0) {
onDevice = false;
cudaHostAlloc(&maskedAllocPointer, length * launchDims.x * DataTypeUtils::sizeOf(nd4j::DataType::INT64), cudaHostAllocDefault);
}
auto imaskedAllocPointer = reinterpret_cast<int *>(maskedAllocPointer);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, imaskedAllocPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
checkCudaErrors(cudaStreamSynchronize(*stream));
if (onDevice)
cudaFree(maskedAllocPointer);
else
cudaFreeHost(maskedAllocPointer);
} else {
dim3 launchDims(512, 512, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param dZ the dZ array
* @param dZShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flatten(Nd4jPointer *extraPointers,
int offset,
char order,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hInput, Nd4jLong *hInputShapeInfo,
void *dInput, Nd4jLong *dInputShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F22 opNum:[7]\n");
// int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF222 opNum:[7]\n");
auto type = nd4j::ArrayOptions::dataType(hInputShapeInfo);
BUILD_SINGLE_SELECTOR(type, flattenKernelGeneric, (launchDims, stream, extraPointers, offset, order, dZ, dZShapeInfo, dInput, dInputShapeInfo), LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::checkP2P() {
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX , dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
cudaSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void NativeOps::enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX , dY);
if (canAccess) {
if (enable) {
cudaDeviceEnablePeerAccess(dY, 0);
} else {
cudaDeviceDisablePeerAccess(dY);
}
} else {
if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
cudaSetDevice(curDevice);
}
allowedP2P = enable;
cudaSetDevice(curDevice);
}
bool NativeOps::isP2PAvailable() {
return supportedP2P;
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaSetDevice(i);
cudaGetDeviceProperties(&deviceProperties[i], i);
cudaDeviceSetLimit(cudaLimitStackSize, 4096);
}
cudaSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
}
void NativeOps::initializeFunctions(Nd4jPointer *functions) {
nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions);
/*
this->cublasSgemv = (CublasSgemv)functions[0];
this->cublasDgemv = (CublasDgemv)functions[1];
this->cublasHgemm = (CublasHgemm)functions[2];
this->cublasSgemm = (CublasSgemm)functions[3];
this->cublasDgemm = (CublasDgemm)functions[4];
this->cublasSgemmEx = (CublasSgemmEx)functions[5];
this->cublasHgemmBatched = (CublasHgemmBatched)functions[6];
this->cublasSgemmBatched = (CublasSgemmBatched)functions[7];
this->cublasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(Nd4jLong memorySize, int flags) {
Nd4jPointer pointer;
// cudaHostAllocMapped |cudaHostAllocPortable
cudaError_t res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize, cudaHostAllocDefault);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(Nd4jLong memorySize, int deviceId, int flags) {
Nd4jPointer pointer;
auto res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int NativeOps::freeHost(Nd4jPointer pointer) {
cudaError_t res = cudaFreeHost(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int NativeOps::freeDevice(Nd4jPointer pointer, int deviceId) {
cudaError_t res = cudaFree(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
Nd4jPointer NativeOps::createContext() {
return 0L;
}
Nd4jPointer NativeOps::createStream() {
Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(cudaStream_t));
CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream");
cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream));
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaStreamCreate(...) failed");
return nativeStream;
}
Nd4jPointer NativeOps::createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer");
cudaError_t dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaEventCreateWithFlags(...) failed");
return nativeEvent;
}
int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cudaError_t dZ = cudaEventRecord(*pEvent, *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaEventRecord(...) failed");
return 1;
}
int NativeOps::setDevice(int deviceId) {
auto dZ = cudaSetDevice(deviceId);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaSetDevice(...) failed");
return 1;
}
Nd4jLong NativeOps::getDeviceFreeMemory() {
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
return (Nd4jLong) memFree;
}
Nd4jLong NativeOps::getDeviceFreeMemory(int device) {
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jLong) memFree;
}
Nd4jLong NativeOps::getDeviceTotalMemory(int device) {
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jLong) memTotal;
}
int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
return memcpyAsync(dst, src, size, flags, reserved);
}
int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaMemcpyKind kind;
DEBUG_KERNEL(pStream, 0);
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
default: {
printf("UNDEFINED MEMCPY!\n");
break;
}
}
cudaError_t dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream);
if (dZ != 0) {
checkCudaErrors(dZ);
printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
throw std::runtime_error("cudaMemcpyAsync(...) failed");
//return 0L;
}
return 1;
}
int NativeOps::memset(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaError_t dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaMemset(...) failed");
return 1;
}
int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaError_t dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaMemsetAsync(...) failed");
return 1;
}
int NativeOps::destroyEvent(Nd4jPointer event) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaError_t dZ = cudaEventDestroy(*pEvent);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaEvenDestroy(...) failed");
return 1;
}
int NativeOps::streamSynchronize(Nd4jPointer stream) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cudaError_t dZ = cudaStreamSynchronize(*pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaStreamSynchronize(...) failed");
return 1L;
}
int NativeOps::eventSynchronize(Nd4jPointer event) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaError_t dZ = cudaEventSynchronize(*pEvent);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaEventSynchronize(...) failed");
return 1L;
}
int NativeOps::getAvailableDevices() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
return devCnt;
}
void NativeOps::enableDebugMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setDebug(reallyEnable);
}
void NativeOps::setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int NativeOps::ompGetMaxThreads() {
return maxThreads;
}
int NativeOps::ompGetNumThreads() {
return maxThreads;
}
void NativeOps::setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void NativeOps::enableVerboseMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setVerbose(reallyEnable);
}
int NativeOps::getDeviceMajor(int device) {
return deviceProperties[device].major;
}
int NativeOps::getDeviceMinor(int device) {
return deviceProperties[device].minor;
}
const char * NativeOps::getDeviceName(int device) {
return deviceProperties[device].name;
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void concatCuda(const int numOfArrs, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) {
__shared__ int arrIdx, blocksPerArr;
__shared__ T *x, *z;
__shared__ Nd4jLong *zShapeInfo, *xShapeInfo, arrLen, arrLenZ, arrLenPerBlock, start, end;
if (threadIdx.x == 0) {
blocksPerArr = (gridDim.x - gridDim.x % numOfArrs) / numOfArrs; // floor
arrIdx = blockIdx.x / blocksPerArr;
if (arrIdx >= numOfArrs)
arrIdx = numOfArrs - 1;
x = reinterpret_cast<T*>(reinterpret_cast<void**>(pVx)[arrIdx]);
z = reinterpret_cast<T*>(reinterpret_cast<void**>(pVz)[arrIdx]);
xShapeInfo = reinterpret_cast<Nd4jLong**>(pxShapeInfo)[arrIdx];
zShapeInfo = reinterpret_cast<Nd4jLong**>(pzShapeInfo)[arrIdx];
arrLen = shape::length(xShapeInfo);
arrLenZ = shape::length(zShapeInfo);
arrLenPerBlock = (arrLen + blocksPerArr - arrLen % blocksPerArr) / blocksPerArr; // ceil
start = arrLenPerBlock * (blockIdx.x % blocksPerArr);
end = (start + arrLenPerBlock) > arrLen ? arrLen : (start + arrLenPerBlock);
}
__syncthreads();
for (Nd4jLong i = threadIdx.x + start; i < end; i += blockDim.x) {
auto zOffset = shape::getIndexOffset(i, zShapeInfo, arrLenZ);
auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLen);
//printf("z[%i][%lld] = x[%i][%lld]\n", arrIdx, zOffset, arrIdx, xOffset);
z[zOffset] = x[xOffset];
}
}
template<typename T>
__host__ static void concatCudaLauncher(const int numOfArrs, cudaStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) {
//int blocks = numOfArrs * 16; // >> 1 << 2);
//nd4j_printf("gridDim.x is %i\n", blocks);
//if (blocks > 8192)
// blocks = 8192; // restrict grid dims to 8K max
concatCuda<T><<<numOfArrs, 128, 512, *stream>>>(numOfArrs, pVx, pxShapeInfo, pVz, pzShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "concat(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void concatCudaLauncher, (const int numOfArrs, cudaStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo), LIBND4J_TYPES);
static void
specialBufferAndShapeWithOffset(void* vZ, Nd4jLong* hZShapeInfo, Nd4jLong* dZShapeInfo, std::vector<Nd4jLong> const& idx, void*& outBuffer, Nd4jLong*& outShape) {
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
const int rank = shape::rank(hZShapeInfo);
Nd4jLong* newShape = new Nd4jLong[shape::shapeInfoLength(rank)];
//ALLOCATE(newShape, nullptr, , Nd4jLong)
auto shapeSize = shape::shapeInfoByteLength(rank);
memcpy(newShape, hZShapeInfo, shapeSize);
auto shapeOf = shape::shapeOf(newShape);
auto stridesOf = shape::stride(newShape);
Nd4jLong offset(0), subArrLen(1);
int n(2), first, last, stride;
for (int d = rank - 1; d >= 0; --d) {
if (idx[n * d] != idx[n * d + 1]) {
auto axeDim = shape::sizeAt(hZShapeInfo, d);
first = idx[n * d] >= 0 ? idx[n * d] : idx[n * d] + axeDim + 1;
last = idx[n * d + 1] >= 0 ? idx[n * d + 1] : idx[n * d + 1] + axeDim + 1;
stride = 1;
shapeOf[d] = (last - first + stride - 1) / stride; // ceil (last - first) / stride;
offset += first * stridesOf[d];
if(shapeOf[d] != 1)
stridesOf[d] *= stride;
}
subArrLen *= shapeOf[d];
}
// check if there is possibility to set ews = 1
shape::setEws(newShape, subArrLen);
//makeBothBuffersActual();
outBuffer = (void*)((int8_t*)vZ + offset * DataTypeUtils::sizeOfElement(zType));
cudaError_t err = cudaMalloc(&outShape, shapeSize);
if (err != 0) {
printf("Cannot allocate memory with error %d\n", err);
throw std::runtime_error("Cannot allocate memory for shape");
}
cudaMemcpy(outShape, newShape, shapeSize, cudaMemcpyHostToDevice);
delete [] newShape;
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data, Nd4jPointer *inputShapeInfo,
Nd4jPointer *ddata, Nd4jPointer *dinputShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hXShapeInfo = hZShapeInfo;
auto hShapePointers = reinterpret_cast<Nd4jLong **>(inputShapeInfo);
auto dShapePointers = reinterpret_cast<Nd4jLong **>(dinputShapeInfo);
// numArrays will be used as number of TADs, so each block process 1 input
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto axis = dimension;
const int rank = shape::rank(reinterpret_cast<Nd4jLong*>(inputShapeInfo[0]));
const int rank2 = 2 * rank;
std::vector<std::vector<Nd4jLong>> indices(numArrays, std::vector<Nd4jLong>(rank2 == 0?2:rank2,0));
// take into account indices for first array
auto axisSize = shape::sizeAt(reinterpret_cast<Nd4jLong*>(inputShapeInfo[0]), axis);
// nd4j_printf("Set up indices...", "");
// nd4j_printf("\n\n\tElement 0 at %i is setting\n", 2 * axis + 1);
indices[0][2 * axis + 1] = axisSize;
// nd4j_printf("\n\n\tElement 0 at %i was set\n", 2 * axis + 1);
// loop through the rest of input arrays
for(int i = 1; i < numArrays; ++i) {
// nd4j_printf("\tIteration %i:\n", i);
indices[i][2 * axis] = indices[i - 1][2 * axis + 1]; // index start from
// nd4j_printf("\n\n\tindices[%i][%i] was set\n", i, 2 * axis);
indices[i][2 * axis + 1] = indices[i - 1][2 * axis + 1] + shape::sizeAt(reinterpret_cast<Nd4jLong*>(inputShapeInfo[i]), axis); // index end with (excluding)
// nd4j_printf("\tindices[%i][%i] was set\n", i, 2 * axis + 1);
}
// nd4j_printf(" done\n", "");
// nd4j_printf("Pack output shapes and buffers...", "");
std::vector<void*> outSubArrsBuffs(numArrays);
std::vector<Nd4jLong*> outSubArrsShapes(numArrays);
for(int i = 0; i < numArrays; ++i) {
specialBufferAndShapeWithOffset(dZ, hZShapeInfo, dZShapeInfo, indices[i], outSubArrsBuffs[i], outSubArrsShapes[i]);
}
// nd4j_printf(" done\n", "");
// nd4j_printf("Prepare device pointers...", "");
// prepare arrays of pointers on buffers and shapes
std::vector<void*> hOutBuffers(numArrays), hInBuffers(numArrays);
std::vector<Nd4jLong*> hOutShapeInfo(numArrays), hInShapeInfo(numArrays);
for(int i = 0; i < numArrays; ++i) {
hOutBuffers[i] = outSubArrsBuffs[i];
hInBuffers[i] = ddata[i];//->getSpecialBuffer();
hOutShapeInfo[i] = outSubArrsShapes[i];
hInShapeInfo[i] = (Nd4jLong*)(dShapePointers[i]);//->getSpecialShapeInfo();
// nd4j_printf("X_%i shape ptr: %p; data ptr: %p;\n", i, hInShapeInfo[i], hInBuffers[i]);
}
// nd4j_printf(" done\n", "");
LaunchContext context(stream);
// allocate and copy all buffers and shapes arrays to global memory
PointersManager manager(&context, "NativeOps::concat");
void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*));
void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*));
void* dInShapeInfo = manager.replicatePointer(hInShapeInfo.data(), hInShapeInfo.size() * sizeof(Nd4jLong*));
void* dOutShapeInfo = manager.replicatePointer(hOutShapeInfo.data(), hOutShapeInfo.size() * sizeof(Nd4jLong*));
// nd4j_printf("Concat itself run...", "");
BUILD_SINGLE_SELECTOR(zType, concatCudaLauncher, (numArrays, stream, dInBuffers, dInShapeInfo, dOutBuffers, dOutShapeInfo), LIBND4J_TYPES);
manager.synchronize();
// nd4j_printf(" done\n", "");
// nd4j_printf("Postprocessing...", "");
// cudaError_t res = cudaStreamSynchronize(*stream);
// checkCudaErrors(res);
// nd4j::DebugHelper::checkErrorCode(stream, "Legacy ConcatFloat(...) failed");
// nd4j_printf(" done\n", "");
// nd4j_printf("Free up rest...", "");
cudaError_t err;
for(int i = 0; i < numArrays; ++i) {
err = cudaFree(outSubArrsShapes[i]);
if (err != 0) {
printf("Error %d occured when shape %i was deallocating.\n", err, i);
throw std::runtime_error("Cannot deallocate memory for shapes.");
}
}
// nd4j_printf(" done\n", "");
// nd4j_printf("All done!!!\n", "");
}
void NativeOps::specialConcat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
void *dZ,
Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
dZ,
dZShapeInfo);
}
/**
* This method saves
*/
void NativeOps::tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *target, Nd4jLong *offsets) {
//nd4j_printf("START ------->\n","");
//nd4j_printf("Shape pointer: [%p]\n", dXShapeInfo);
//nd4j_printf("Dimension pointer: [%p]\n", dimension);
//nd4j_printf("shape rank: [%i]; dimLength: [%i]\n", shape::rank(dXShapeInfo), dimensionLength);
//shape::printShapeInfoLinear(dXShapeInfo);
//fflush(stdout);
//shape::printArray<int>(reinterpret_cast<void*>(dimension), dimensionLength, "dimensions");
//fflush(stdout);
//nd4j_printf("END ------->\n","");
//shape::TAD tad;
//tad.init(dXShapeInfo, dimension, dimensionLength);
//nd4j_printf("Creating TAD shape...\n","");
//tad.createTadOnlyShapeInfo();
//nd4j_printf("Creating TAD offsets...\n","");
//tad.createOffsets();
//nd4j_printf("memcpy TAD shape...\n","");
//std::memcpy(reinterpret_cast<void *>(target), tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo));
//nd4j_printf("memcpy TAD offsets...\n","");
//std::memcpy(reinterpret_cast<void *>(offsets), tad.tadOffsets, tad.numTads * sizeof(Nd4jLong));
//nd4j_printf("memcpy finished...\n","");
auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(dXShapeInfo, dimension, dimensionLength);
std::memcpy(reinterpret_cast<void *>(target), tadPack.primaryShapeInfo(), shape::shapeInfoByteLength(tadPack.primaryShapeInfo()));
std::memcpy(reinterpret_cast<void *>(offsets), tadPack.primaryOffsets(), tadPack.numberOfTads() * sizeof(Nd4jLong));
}
int NativeOps::memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
}
//cudaError_t dZ = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
cudaError_t dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
checkCudaErrors(dZ);
if (dZ != 0)
throw std::runtime_error("cudaMemcpyToSymbolAsync(...) failed");
return 1;
}
Nd4jPointer NativeOps::getConstantSpace() {
Nd4jPointer dConstAddr;
cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0)
throw std::runtime_error("cudaGetSymbolAddress(...) failed");
return dConstAddr;
}
void NativeOps::pullRows(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jLong n,
Nd4jLong *indexes,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets,
Nd4jLong *zTadShapeInfo,
Nd4jLong *zTadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::average(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong *xShapeInfo,
Nd4jPointer *dx, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dz, Nd4jLong *dzShapeInfo,
int n,
Nd4jLong length,
bool propagate) {
cudaStream_t * stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageFloat called\n");
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES);
}
}
void NativeOps::accumulate(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong *xShapeInfo,
Nd4jPointer *dx, Nd4jLong *dXShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *dz, Nd4jLong *dzShapeInfo,
int n,
Nd4jLong length) {
auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateFloat called\n");
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n,length), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES);
}
}
void NativeOps::shuffle(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jPointer *xShapeInfo,
Nd4jPointer *dx, Nd4jPointer *dXShapeInfo,
Nd4jPointer *z, Nd4jPointer *zShapeInfo,
Nd4jPointer *dz, Nd4jPointer *dZShapeInfo,
int N,
int *shuffleMap,
Nd4jPointer *tadShapeInfo,
Nd4jPointer *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo);
auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
auto xType = nd4j::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(256, 512, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "shuffle(...) failed");
}
/*
void NativeOps::execMetaPredicateShape(Nd4jPointer *extras,
const int opTypeA,
const int opNumA,
const int opTypeB,
const int opNumB,
Nd4jLong N,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraA,
void *extraB,
double scalarA,
double scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, functions::grid::GRIDShaped, ::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraA, extraB, scalarA, scalarB), LIBND4J_TYPES);
// functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dy, dYShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
*/
bool NativeOps::isExperimentalEnabled() {
return nd4j::Environment::getInstance()->isExperimentalBuild();
}
void NativeOps::setOmpMinThreads(int threads) {
minThreads = nd4j::math::nd4j_max<int>(32, threads);
minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads);
}
int NativeOps::getDevice() {
int curDevice = -1;
cudaGetDevice(&curDevice);
return curDevice;
}
void NativeOps::setElementThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::setTADThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
bool biasCorrected) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape,
bool biasCorrected,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
dim3 launchDims = dim3(256, 256, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execReduce3(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
dim3 launchDims(256, 256, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES)
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduce3(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks, 256, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES)
}
////////////////////////////////////////////////////////////////////////
void NativeOps::execReduce3Scalar(Nd4jPointer *extraPointers,int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParams,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
if (xType != yType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Y operand to have X type", xType, yType);
if (!DataTypeUtils::isR(zType))
throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), LIBND4J_TYPES, FLOAT_TYPES);
}
void NativeOps::execScalarBool(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalar, Nd4jLong *hScalarShapeInfo,
void *dScalar, Nd4jLong *dScalarShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (xType != yType )
throw std::runtime_error("NativeOps::execScalarBool requires X & Y to have same type");
if (!DataTypeUtils::isB(zType) )
throw std::runtime_error("NativeOps::execScalarBool requires Z operand to have BOOL type");
BUILD_DOUBLE_SELECTOR(xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, BOOL_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarBool(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalars, Nd4jLong *hScalarShapeInfo,
void *dScalars, Nd4jLong *dScalarShapeInfo,
void *extraParams,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (xType != yType )
throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires X & Y to have same type", xType, yType);
if (!DataTypeUtils::isB(zType) )
throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires Z operand to have BOOL type", nd4j::DataType::BOOL, zType);
BUILD_DOUBLE_SELECTOR(xType, yType, functions::scalar::ScalarBoolTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalar(
Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalar, Nd4jLong *hScalarShapeInfo,
void *dScalar, Nd4jLong *dScalarShapeInfo,
void *extraParams) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(256, 512, 8192);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType);
if (!Environment::getInstance()->isExperimentalBuild() && Environment::getInstance()->isDebug()) {
auto sX = DataTypeUtils::asString(xType);
auto sY = DataTypeUtils::asString(yType);
auto sZ = DataTypeUtils::asString(zType);
nd4j_printf("Running execScalar with dtypes: [%s], [%s], [%s]\n", sX.c_str(), sY.c_str(), sZ.c_str());
}
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalar(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hScalars, Nd4jLong *hScalarShapeInfo,
void *dScalars, Nd4jLong *dScalarShapeInfo,
void *extraParams,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execAggregate(Nd4jPointer *extraPointers,
int opNum,
void **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
void *realArguments,
int numRealArguments,
nd4j::DataType dtype) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateKernelGeneric(launchDims, stream, opNum, arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), FLOAT_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed");
}
void NativeOps::execAggregateBatch(Nd4jPointer *extraPointers,
int numAggregates, int opNum,
int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize,
int maxIdx, int maxReals,
void *ptrToArguments, nd4j::DataType dtype) {
// not implemented yet
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateBatchKernelGeneric(launchDims, stream, opNum, numAggregates, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), FLOAT_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execRandom(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer stateHost,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
auto rng = reinterpret_cast<nd4j::graph::RandomGenerator *>(stateHost);
Nd4jPointer stateDevice;
bool onDevice = false;
dim3 launchDims(512, 512, 32768);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
auto res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
if (res == 0) {
onDevice = true;
checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream));
} else {
cudaHostAlloc(&stateDevice, sizeOf, cudaHostAllocDefault);
std::memcpy(stateDevice, stateHost, sizeOf);
}
BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction, ::executeCudaSingle(launchDims, extraPointers, opNum, stateDevice, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(cudaStreamSynchronize(*stream));
if (onDevice)
cudaFree(stateDevice);
else
cudaFreeHost(stateDevice);
rng->rewindH(shape::length(hZShapeInfo));
}
void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
auto rng = reinterpret_cast<nd4j::graph::RandomGenerator *>(stateHost);
Nd4jPointer stateDevice;
dim3 launchDims(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo);
bool onDevice = false;
auto res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
if (res == 0) {
onDevice = true;
checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream));
}else {
cudaHostAlloc(&stateDevice, sizeOf, cudaHostAllocDefault);
std::memcpy(stateDevice, stateHost, sizeOf);
}
BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaDouble(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(cudaStreamSynchronize(*stream));
if (onDevice)
cudaFree(stateDevice);
else
cudaFreeHost(stateDevice);
rng->rewindH(shape::length(hZShapeInfo));
}
void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *extraArguments) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto sizeOf = sizeof(nd4j::graph::RandomGenerator);
auto rng = reinterpret_cast<nd4j::graph::RandomGenerator *>(stateHost);
Nd4jPointer stateDevice;
dim3 launchDims(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo);
bool onDevice = false;
auto res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf);
if (res == 0) {
onDevice = true;
checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream));
} else {
cudaHostAlloc(&stateDevice, sizeOf, cudaHostAllocDefault);
std::memcpy(stateDevice, stateHost, sizeOf);
}
BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaTriple(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES);
checkCudaErrors(cudaStreamSynchronize(*stream));
if (onDevice)
cudaFree(stateDevice);
else
cudaFreeHost(stateDevice);
rng->rewindH(shape::length(hZShapeInfo));
}
Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// cudaStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
cudaDeviceSynchronize();
delete buffer;
}
void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
cudaStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream);
}
void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
cudaStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) {
auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer NativeOps::pointerForAddress(Nd4jLong address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void NativeOps::tear(Nd4jPointer *extras,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
Nd4jPointer *targets,
Nd4jLong *zShapeInfo,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
}
void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(&extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (nd4j::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = nd4j::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048)
sharedMemSize = 2048;
if (sharedMemLastBlock < 2048)
sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
nd4j::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
nd4j::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
nd4j::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed");
}
void NativeOps::encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 1024);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed");
}
void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
//encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz);
prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed");
}
void NativeOps::encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 4096);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed");
}
void NativeOps::decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
dim3 launchDims(numBlocks, blockSize, 1024);
auto zType = nd4j::ArrayOptions::dataType(zShapeInfo);
BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed");
}
void NativeOps::execReduce3All(Nd4jPointer *extraPointers,
int opNum,
void *hX, Nd4jLong *hXShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
void *extraParamsVals,
void *hY, Nd4jLong *hYShapeInfo,
void *dY, Nd4jLong *dYShapeInfo,
void *hZ, Nd4jLong *hZShapeInfo,
void *dZ, Nd4jLong *dZShapeInfo,
void *hDimension, Nd4jLong *hDimensionShape,
void *dDimension, Nd4jLong *dDimensionShape,
Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) {
auto dimension = reinterpret_cast<int *>(dDimension);
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims(shape::length(hZShapeInfo), 256, 32768);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AD119 opNum:[%i]\n", opNum);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo);
auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled())
throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType);
if (yType != xType)
throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), LIBND4J_TYPES, FLOAT_TYPES);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::sort(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
bool descending) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[ 1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES);
}
}
} else {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES);
n>>=1;
rev = 1;
} while(n > 1);
}
}
nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed");
}
void NativeOps::sortTad(Nd4jPointer *extraPointers,
void *x, Nd4jLong *xShapeInfo,
void *dX, Nd4jLong *dXShapeInfo,
int *dimension,
int dimensionLength,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets,
bool descending) {
// to be implemented
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims(tadPack.numberOfTads(), 1024, 33768);
auto xType = nd4j::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "sortTadFloat(...) failed");
}
void NativeOps::sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
Nd4jLong NativeOps::encodeBitmap(Nd4jPointer *extraPointers,
void *dx, Nd4jLong *hXShapeInfo,
Nd4jLong N,
int *dz,
float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims(512, 512, 32768);
auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed");
Nd4jLong dZ = (Nd4jLong) resultPointer[0];
resultPointer[0] = 0;
return dZ;
}
void NativeOps::decodeBitmap(Nd4jPointer *extraPointers,
void *dx,
Nd4jLong N,
void *dz, Nd4jLong *zShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims(512, 512, 16384);
auto xType = nd4j::ArrayOptions::dataType(zShapeInfo);
BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed");
}
Nd4jLong* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) {
return nullptr;
}
void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) {
}
nd4j::graph::ResultWrapper* NativeOps::executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nd4j::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer);
}
const char* NativeOps::getAllCustomOps() {
return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations();
}
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) {
nd4j::graph::VariableSpace varSpace;
Context block(2, &varSpace);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numBArgs; e++)
block.getBArguments()->push_back(bArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
auto array = new nd4j::NDArray(buffer_, shape_);
array->triggerAllocationFlag(false, false);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.workspace() != nullptr)
shapeList->detach();
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs);
}
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
Context block(1);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<nd4j::NDArray*> inputs(numInputs);
std::vector<nd4j::NDArray*> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(numBArgs);
std::vector<Nd4jLong> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
inputs[e] = new nd4j::NDArray(buffer, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e]));
void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
// FIXME: revisit this.
bool canNullify = true;
for (int i = 0; i < numInputs; i++) {
void *ibuffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i];
if (ibuffer == buffer) {
canNullify = false;
break;
}
}
if (canNullify)
memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape)));
auto array = new nd4j::NDArray(buffer, shape);
outputs[e] = array;
// and we want to release shape copy once we're done
array->triggerAllocationFlag(false, true);
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
for (int e = 0; e < numBArgs; e++)
bbArgs[e] = bArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace);
//auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
//shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]);
//shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo());
//outputs[e]->printIndexedBuffer("C++ raw output");
//outputs[e]->printBuffer("C++ indexed output");
if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])));
}
/*
if (!isInplace) {
if (dZ->size() != numOutputs) {
return ND4J_STATUS_BAD_OUTPUT;
}
for (int e = 0; e < numOutputs; e++) {
auto buffer = (T *) outputBuffers[e];
auto shape = (int *) outputShapes[e];
nd4j::NDArray<T> tmp(buffer, shape);
if (tmp.lengthOf() != dZ->at(e)->lengthOf()) {
nd4j_printf("Provided output array for [%s] has length of %i, but actual dZ has length of %i\n", op->getOpName()->c_str(), tmp.lengthOf(), dZ->at(e)->lengthOf());
return ND4J_STATUS_BAD_OUTPUT;
}
tmp.assign(dZ->at(e));
}
} else {
// if op is inplace, our ResultSet holds pointers
dZ->purge();
}
delete dZ;
*/
for (auto v: inputs)
delete v;
for (auto v: outputs)
delete v;
return Status::OK();
}
int NativeOps::execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
}
int NativeOps::execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash);
auto context = reinterpret_cast<Context*>(opContext);
return op->execute(context);
}
int NativeOps::registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<nd4j::NDArray*> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new nd4j::graph::VariablesSet(dZ);
if (dZ == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet* NativeOps::executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) {
nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId);
return ND4J_STATUS_OK;
}
void NativeOps::deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void NativeOps::deleteIntArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void NativeOps::deleteLongArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<Nd4jLong *>(pointer);
delete[] ptr;
}
template <typename T>
static void deleteVariablesSetT(Nd4jPointer pointer) {
nd4j::graph::VariablesSet* ptr = reinterpret_cast<nd4j::graph::VariablesSet*>(pointer);
delete ptr;
}
void NativeOps::deleteVariablesSet(Nd4jPointer pointer) {
deleteVariablesSetT<double>(pointer);
}
void NativeOps::deleteShapeList(Nd4jPointer shapeList) {
nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList);
list->destroy();
delete list;
}
const char* NativeOps::getAllOperations() {
return nd4j::OpTracker::getInstance()->exportOperations();
}
Nd4jPointer NativeOps::getGraphState(Nd4jLong id) {
return (Nd4jPointer) new nd4j::graph::GraphState(id);
}
void NativeOps::deleteGraphState(Nd4jPointer state) {
auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state);
delete stateP;
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int) scopes[e];
if (!state->hasScope(scopeId)) {
// nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Status::THROW();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK())
return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace to the same ID
//varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK();
}
Nd4jStatus NativeOps::execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState*>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs);
}
void NativeOps::deleteResultWrapper(Nd4jPointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr);
delete p;
}
int NativeOps::estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ);
*/
void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
//nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
//nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
//nd4j::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//nd4j::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
}
Nd4jPointer NativeOps::createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) {
auto u = new nd4j::utf8string(string, length);
return reinterpret_cast<Nd4jPointer>(u);
}
void NativeOps::deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
delete(reinterpret_cast<nd4j::utf8string*>(ptr));
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs,
void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets,
void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets,
const int* indexes) {
__shared__ T *x, *y;
__shared__ Nd4jLong arrLenX, arrLenY;
for (int e = 0; e < numOfSubArrs; e++ ) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner)
continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T*>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T*>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY)
return;
for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLenX);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo, arrLenY);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template<typename T>
__host__ static void scatterUpdateCudaLauncher(const cudaStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) {
scatterUpdateCuda<T><<<512, 256, MAX_NUM_THREADS, *stream>>>(opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void NativeOps::scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs,
void* hX, Nd4jLong* hXShapeInfo, Nd4jLong* hXOffsets,
void* dX, Nd4jLong* dXShapeInfo, Nd4jLong* dXOffsets,
void* hY, Nd4jLong* hYShapeInfo, Nd4jLong* hYOffsets,
void* dY, Nd4jLong* dYShapeInfo, Nd4jLong* dYOffsets,
int* hIindexes, int* dIndexes) {
auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
nd4j::DataType type = ArrayOptions::dataType(hXShapeInfo);
BUILD_SINGLE_SELECTOR(type, scatterUpdateCudaLauncher, (stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIndexes), LIBND4J_TYPES);
nd4j::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed");
}
void NativeOps::inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) {
auto p = reinterpret_cast<nd4j::DebugInfo*>(debugInfo);
NDArray array(buffer, shapeInfo, nullptr);
nd4j::DebugHelper::retrieveDebugStatistics(p, &array);
}
void __global__ tryPointerKernel(void* p, int len) {
auto buf = reinterpret_cast<int8_t*>(p);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int b;
if (tid < len)
atomicAdd(&b, buf[tid]);
__syncthreads();
if (threadIdx.x ==0 && blockIdx.x == 0)
printf("Pointer check complete: %i\n", b);
}
void NativeOps::tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) {
cudaStream_t stream;
cudaStreamCreate(&stream);
tryPointerKernel<<<256, 512, len+64, stream>>>(p, len);
auto e = cudaStreamSynchronize(stream);
if (e != 0)
throw std::runtime_error("tryPointer failed");
cudaStreamDestroy(stream);
}
|
68c9360e33e13ae95427b905f0a915c06727725f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <iomanip>
#include <iostream>
#include <omp.h>
#include <vector>
#define C 4
#define THREADS 1024 // 2^10
// #define MAX 85
#define MAX 110 // for multi-gpu only
#define MAX_S MAX* MAX
#define PERM_MAX (MAX * (MAX - 1) * (MAX - 2) * (MAX - 3)) / 24
#define pb push_back
#define mp make_pair
#define gpuErrChk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(hipError_t code, char* file, int line, bool abort = true)
{
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
getchar();
}
}
using namespace std;
typedef long long int64;
typedef pair<int, int> ii;
/*
sz ---> Adjacency matrix dimension (1D)
perm ---> Number of permutations of an instance
graph ---> Adjacency matrix itself
seeds ---> Set of seeds
faces ---> Set of triangular faces for the output
*/
struct Node {
int sz, perm;
int graph[MAX_S], seeds[C * PERM_MAX], F_ANS[6 * MAX];
};
/*
faces ---> Number of triangular faces
count ---> Number of remaining vertices
tmpMax ---> Max value obtained for a seed
F ---> Set of triangular faces
F ---> Set of remaining vertices
*/
struct Params {
int *faces, *count, *tmpMax;
int *F, *V;
};
/*
SIZE ---> Number of vertices
BLOCKS ---> Number of blocks
PERM ---> Number of permutations
R ---> Output graph for a possible solution
F ---> Set of triangular faces of an instance
qtd ---> Number of possible 4-cliques
*/
int SIZE, PERM, GPU_CNT = 1;
int R[MAX_S], F[8 * MAX], bib[MAX];
int qtd = 0;
Node* N;
/*
Generates a list containing the vertices which are not on the planar graph
*/
__device__ void generateList(Node* devN, Params* devP, int t, int offset)
{
int sz = devN->sz, perm = devN->perm;
int va = devN->seeds[(t + offset) * 4],
vb = devN->seeds[(t + offset) * 4 + 1],
vc = devN->seeds[(t + offset) * 4 + 2],
vd = devN->seeds[(t + offset) * 4 + 3];
for (int i = 0; i < sz; i++) {
if (i == va || i == vb || i == vc || i == vd)
devP->V[t + i * perm] = -1;
else
devP->V[t + i * perm] = i;
}
}
/*
Returns the weight of the planar graph so far
*/
__device__ void generateTriangularFaceList(Node* devN, Params* devP, int graph[], int t, int offset)
{
int sz = devN->sz, perm = devN->perm;
int va = devN->seeds[(t + offset) * 4],
vb = devN->seeds[(t + offset) * 4 + 1],
vc = devN->seeds[(t + offset) * 4 + 2],
vd = devN->seeds[(t + offset) * 4 + 3];
//generate first triangle of the output graph
devP->F[t + (devP->faces[t] * 3) * perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc;
//generate the next 3 possible faces
devP->F[t + (devP->faces[t] * 3) * perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd;
devP->F[t + (devP->faces[t] * 3) * perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vc;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd;
devP->F[t + (devP->faces[t] * 3) * perm] = vb;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vc;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd;
int resp = graph[va * sz + vb] + graph[va * sz + vc] + graph[vb * sz + vc];
resp += graph[va * sz + vd] + graph[vb * sz + vd] + graph[vc * sz + vd];
devP->tmpMax[t] = resp;
}
/*
Insert a new vertex, 3 new triangular faces and removes face 'f' from the set
*/
__device__ int operationT2(Node* devN, Params* devP, int graph[], int new_vertex, int f, int t)
{
int sz = devN->sz, perm = devN->perm;
//remove the chosen face and insert a new one
int va = devP->F[t + (f * 3) * perm],
vb = devP->F[t + (f * 3 + 1) * perm],
vc = devP->F[t + (f * 3 + 2) * perm];
devP->F[t + (f * 3) * perm] = new_vertex,
devP->F[t + (f * 3 + 1) * perm] = va,
devP->F[t + (f * 3 + 2) * perm] = vb;
//and insert the other two possible faces
devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = va;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc;
devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc;
int resp = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex];
return resp;
}
/*
Return the vertex with the maximum gain inserting within a face 'f'
*/
__device__ int maxGain(Node* devN, Params* devP, int graph[], int* f, int t)
{
int sz = devN->sz, perm = devN->perm;
int gain = -1, vertex = -1;
//iterate through the remaining vertices
for (int new_vertex = 0; new_vertex < sz; new_vertex++) {
if (devP->V[t + new_vertex * perm] == -1)
continue;
//and test which has the maximum gain with its insetion
//within all possible faces
int faces = devP->faces[t];
for (int i = 0; i < faces; i++) {
int va = devP->F[t + (i * 3) * perm], vb = devP->F[t + (i * 3 + 1) * perm], vc = devP->F[t + (i * 3 + 2) * perm];
int tmpGain = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex];
if (tmpGain > gain) {
gain = tmpGain;
*f = i;
vertex = new_vertex;
}
}
}
return vertex;
}
__device__ void tmfg(Node* devN, Params* devP, int graph[], int t)
{
int perm = devN->perm;
while (devP->count[t]) {
int f = -1;
int vertex = maxGain(devN, devP, graph, &f, t);
devP->V[t + vertex * perm] = -1;
devP->tmpMax[t] += operationT2(devN, devP, graph, vertex, f, t);
devP->count[t]--;
}
}
__device__ void copyGraph(Node* devN, Params* devP, int t)
{
int faces = devP->faces[t], perm = devN->perm;
for (int i = 0; i < faces; i++) {
int va = devP->F[t + (i * 3) * perm], vb = devP->F[t + (i * 3 + 1) * perm], vc = devP->F[t + (i * 3 + 2) * perm];
devN->F_ANS[i * 3] = va, devN->F_ANS[i * 3 + 1] = vb, devN->F_ANS[i * 3 + 2] = vc;
}
}
__device__ void initializeDevice(Params* devP, int sz, int t)
{
devP->faces[t] = 0;
devP->tmpMax[t] = -1;
devP->count[t] = sz - 4;
}
__global__ void tmfgParallel(Node* devN, Params devP, int* respMax, int offset)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int sz = devN->sz, perm = devN->perm;
extern __shared__ int graph[];
for (int i = threadIdx.x; i < sz * sz; i += blockDim.x)
graph[i] = devN->graph[i];
__syncthreads();
if (x < perm) {
initializeDevice(&devP, sz, x);
generateList(devN, &devP, x, offset);
generateTriangularFaceList(devN, &devP, graph, x, offset);
tmfg(devN, &devP, graph, x);
atomicMax(respMax, devP.tmpMax[x]);
__syncthreads();
if (devP.tmpMax[x] == *respMax) {
copyGraph(devN, &devP, x);
}
__syncthreads();
}
}
int tmfgPrepare()
{
int finalResp = -1, pos = -1;
#pragma omp parallel for num_threads(GPU_CNT)
for (int gpu_id = 0; gpu_id < GPU_CNT; gpu_id++) {
hipSetDevice(gpu_id);
int range = (int)ceil(PERM / (double)GPU_CNT);
int perm = ((gpu_id + 1) * range > PERM ? PERM - gpu_id * range : range);
int offset = gpu_id * range;
N->perm = perm;
int resp = -1, *tmpResp;
gpuErrChk(hipMalloc((void**)&tmpResp, sizeof(int)));
gpuErrChk(hipMemcpy(tmpResp, &resp, sizeof(int), hipMemcpyHostToDevice));
Node* devN;
Params devP;
size_t sz = range * sizeof(int) * 3 + range * sizeof(int) * SIZE + range * sizeof(int) * (6 * SIZE);
printf("Using %d mbytes in Kernel %d\n", (sz + sizeof(Node)) / (1 << 20), gpu_id);
fprintf(stderr, "Using %d mbytes in Kernel %d\n", (sz + sizeof(Node)) / (1 << 20), gpu_id);
gpuErrChk(hipMalloc((void**)&devN, sizeof(Node)));
gpuErrChk(hipMemcpy(devN, N, sizeof(Node), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc((void**)&devP.faces, perm * sizeof(int)));
gpuErrChk(hipMalloc((void**)&devP.count, perm * sizeof(int)));
gpuErrChk(hipMalloc((void**)&devP.tmpMax, perm * sizeof(int)));
gpuErrChk(hipMalloc((void**)&devP.F, 6 * SIZE * perm * sizeof(int)));
gpuErrChk(hipMalloc((void**)&devP.V, SIZE * perm * sizeof(int)));
dim3 blocks(perm / THREADS + 1, 1);
dim3 threads(THREADS, 1);
printf("Kernel %d launched with %d blocks, each w/ %d threads\n", gpu_id, range / THREADS + 1, THREADS);
fprintf(stderr, "Kernel %d launched with %d blocks, each w/ %d threads\n", gpu_id, range / THREADS + 1, THREADS);
hipLaunchKernelGGL(( tmfgParallel), dim3(blocks), dim3(threads), SIZE * SIZE * sizeof(int), 0, devN, devP, tmpResp, offset);
gpuErrChk(hipDeviceSynchronize());
//copy back the maximum weight and the set of faces
//which gave this result
gpuErrChk(hipMemcpy(&resp, tmpResp, sizeof(int), hipMemcpyDeviceToHost));
printf("Kernel finished.\nLocal maximum found in Kernel %d: %d\n", gpu_id, resp);
printf("Copying results...\n");
#pragma omp critical
{
if (resp > finalResp) {
finalResp = resp;
pos = gpu_id;
}
}
// #pragma omp barrier
if (pos == gpu_id) {
gpuErrChk(hipMemcpy(&F, devN->F_ANS, 6 * MAX * sizeof(int), hipMemcpyDeviceToHost));
}
printf("Freeing memory...\n");
gpuErrChk(hipFree(devN));
gpuErrChk(hipFree(devP.faces));
gpuErrChk(hipFree(devP.count));
gpuErrChk(hipFree(devP.tmpMax));
gpuErrChk(hipFree(devP.F));
gpuErrChk(hipFree(devP.V));
gpuErrChk(hipDeviceReset());
}
return finalResp;
}
void printElapsedTime(double start, double stop)
{
double elapsed = stop - start;
printf("Elapsed time: %.3lfs.\n", elapsed);
}
double getTime()
{
timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
return double(ts.tv_sec) + double(ts.tv_nsec) / 1e9;
}
/*
C ---> Size of the combination
index ---> Current index in data[]
data[] ---> Temporary array to store a current combination
i ---> Index of current element in vertices[]
*/
void combineUntil(int index, vector<int>& data, int i)
{
// Current cobination is ready, print it
if (index == C) {
for (int j = 0; j < C; j++) {
N->seeds[qtd * C + j] = data[j];
}
qtd++;
return;
}
// When there are no more elements to put in data[]
if (i >= SIZE)
return;
//current is inserted; put next at a next location
data[index] = i;
combineUntil(index + 1, data, i + 1);
//current is deleted; replace it with next
combineUntil(index, data, i + 1);
}
void combine()
{
vector<int> data(C);
//print all combinations of size 'r' using a temporary array 'data'
combineUntil(0, data, 0);
}
void initialize()
{
for (int i = 0; i < SIZE; i++) {
for (int j = i + 1; j < SIZE; j++) {
R[i * SIZE + j] = R[j * SIZE + i] = -1;
}
}
}
void readInput()
{
int x;
cin >> SIZE;
PERM = bib[SIZE - 1];
N = (Node*)malloc(sizeof(Node));
N->sz = SIZE;
for (int i = 0; i < SIZE; i++) {
for (int j = i + 1; j < SIZE; j++) {
cin >> x;
N->graph[i * SIZE + j] = x;
N->graph[j * SIZE + i] = x;
}
}
}
/*
Define the number of permutations and blocks
*/
void sizeDefinitions()
{
for (int i = 6; i <= MAX; i++) {
int resp = 1;
for (int j = i - 3; j <= i; j++)
resp *= j;
resp /= 24;
bib[i - 1] = resp;
}
}
int main(int argv, char** argc)
{
ios::sync_with_stdio(false);
sizeDefinitions();
//read the input, which is given by a size of a graph and its weighted edges.
//the given graph is dense.
readInput();
initialize();
//given the number of vertices, generate multiple 4-clique seeds
combine();
if (argv == 2) {
hipSetDevice(atoi(argc[1]));
} else if (argv == 3) {
GPU_CNT = atoi(argc[2]);
int d;
hipGetDeviceCount(&d);
if (GPU_CNT > d)
GPU_CNT = d;
}
//cout << gpuCount << endl;
//hipSetDevice(dv);
double start = getTime();
int respMax = tmfgPrepare();
double stop = getTime();
//reconstruct the graph given the regions of the graph
for (int i = 0; i < 2 * SIZE; i++) {
int va = F[i * 3], vb = F[i * 3 + 1], vc = F[i * 3 + 2];
if (va == vb && vb == vc)
continue;
R[va * SIZE + vb] = R[vb * SIZE + va] = N->graph[va * SIZE + vb];
R[va * SIZE + vc] = R[vc * SIZE + va] = N->graph[va * SIZE + vc];
R[vb * SIZE + vc] = R[vc * SIZE + vb] = N->graph[vb * SIZE + vc];
}
cout << "Printing generated graph: " << endl;
for (int i = 0; i < SIZE; i++) {
for (int j = i + 1; j < SIZE; j++) {
cout << R[i * SIZE + j] << " ";
}
cout << endl;
}
printElapsedTime(start, stop);
cout << "Maximum weight found: " << respMax << endl;
free(N);
return 0;
}
|
68c9360e33e13ae95427b905f0a915c06727725f.cu
|
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
#include <iomanip>
#include <iostream>
#include <omp.h>
#include <vector>
#define C 4
#define THREADS 1024 // 2^10
// #define MAX 85
#define MAX 110 // for multi-gpu only
#define MAX_S MAX* MAX
#define PERM_MAX (MAX * (MAX - 1) * (MAX - 2) * (MAX - 3)) / 24
#define pb push_back
#define mp make_pair
#define gpuErrChk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, char* file, int line, bool abort = true)
{
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
getchar();
}
}
using namespace std;
typedef long long int64;
typedef pair<int, int> ii;
/*
sz ---> Adjacency matrix dimension (1D)
perm ---> Number of permutations of an instance
graph ---> Adjacency matrix itself
seeds ---> Set of seeds
faces ---> Set of triangular faces for the output
*/
struct Node {
int sz, perm;
int graph[MAX_S], seeds[C * PERM_MAX], F_ANS[6 * MAX];
};
/*
faces ---> Number of triangular faces
count ---> Number of remaining vertices
tmpMax ---> Max value obtained for a seed
F ---> Set of triangular faces
F ---> Set of remaining vertices
*/
struct Params {
int *faces, *count, *tmpMax;
int *F, *V;
};
/*
SIZE ---> Number of vertices
BLOCKS ---> Number of blocks
PERM ---> Number of permutations
R ---> Output graph for a possible solution
F ---> Set of triangular faces of an instance
qtd ---> Number of possible 4-cliques
*/
int SIZE, PERM, GPU_CNT = 1;
int R[MAX_S], F[8 * MAX], bib[MAX];
int qtd = 0;
Node* N;
/*
Generates a list containing the vertices which are not on the planar graph
*/
__device__ void generateList(Node* devN, Params* devP, int t, int offset)
{
int sz = devN->sz, perm = devN->perm;
int va = devN->seeds[(t + offset) * 4],
vb = devN->seeds[(t + offset) * 4 + 1],
vc = devN->seeds[(t + offset) * 4 + 2],
vd = devN->seeds[(t + offset) * 4 + 3];
for (int i = 0; i < sz; i++) {
if (i == va || i == vb || i == vc || i == vd)
devP->V[t + i * perm] = -1;
else
devP->V[t + i * perm] = i;
}
}
/*
Returns the weight of the planar graph so far
*/
__device__ void generateTriangularFaceList(Node* devN, Params* devP, int graph[], int t, int offset)
{
int sz = devN->sz, perm = devN->perm;
int va = devN->seeds[(t + offset) * 4],
vb = devN->seeds[(t + offset) * 4 + 1],
vc = devN->seeds[(t + offset) * 4 + 2],
vd = devN->seeds[(t + offset) * 4 + 3];
//generate first triangle of the output graph
devP->F[t + (devP->faces[t] * 3) * perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc;
//generate the next 3 possible faces
devP->F[t + (devP->faces[t] * 3) * perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd;
devP->F[t + (devP->faces[t] * 3) * perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vc;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd;
devP->F[t + (devP->faces[t] * 3) * perm] = vb;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vc;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd;
int resp = graph[va * sz + vb] + graph[va * sz + vc] + graph[vb * sz + vc];
resp += graph[va * sz + vd] + graph[vb * sz + vd] + graph[vc * sz + vd];
devP->tmpMax[t] = resp;
}
/*
Insert a new vertex, 3 new triangular faces and removes face 'f' from the set
*/
__device__ int operationT2(Node* devN, Params* devP, int graph[], int new_vertex, int f, int t)
{
int sz = devN->sz, perm = devN->perm;
//remove the chosen face and insert a new one
int va = devP->F[t + (f * 3) * perm],
vb = devP->F[t + (f * 3 + 1) * perm],
vc = devP->F[t + (f * 3 + 2) * perm];
devP->F[t + (f * 3) * perm] = new_vertex,
devP->F[t + (f * 3 + 1) * perm] = va,
devP->F[t + (f * 3 + 2) * perm] = vb;
//and insert the other two possible faces
devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = va;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc;
devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc;
int resp = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex];
return resp;
}
/*
Return the vertex with the maximum gain inserting within a face 'f'
*/
__device__ int maxGain(Node* devN, Params* devP, int graph[], int* f, int t)
{
int sz = devN->sz, perm = devN->perm;
int gain = -1, vertex = -1;
//iterate through the remaining vertices
for (int new_vertex = 0; new_vertex < sz; new_vertex++) {
if (devP->V[t + new_vertex * perm] == -1)
continue;
//and test which has the maximum gain with its insetion
//within all possible faces
int faces = devP->faces[t];
for (int i = 0; i < faces; i++) {
int va = devP->F[t + (i * 3) * perm], vb = devP->F[t + (i * 3 + 1) * perm], vc = devP->F[t + (i * 3 + 2) * perm];
int tmpGain = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex];
if (tmpGain > gain) {
gain = tmpGain;
*f = i;
vertex = new_vertex;
}
}
}
return vertex;
}
__device__ void tmfg(Node* devN, Params* devP, int graph[], int t)
{
int perm = devN->perm;
while (devP->count[t]) {
int f = -1;
int vertex = maxGain(devN, devP, graph, &f, t);
devP->V[t + vertex * perm] = -1;
devP->tmpMax[t] += operationT2(devN, devP, graph, vertex, f, t);
devP->count[t]--;
}
}
__device__ void copyGraph(Node* devN, Params* devP, int t)
{
int faces = devP->faces[t], perm = devN->perm;
for (int i = 0; i < faces; i++) {
int va = devP->F[t + (i * 3) * perm], vb = devP->F[t + (i * 3 + 1) * perm], vc = devP->F[t + (i * 3 + 2) * perm];
devN->F_ANS[i * 3] = va, devN->F_ANS[i * 3 + 1] = vb, devN->F_ANS[i * 3 + 2] = vc;
}
}
__device__ void initializeDevice(Params* devP, int sz, int t)
{
devP->faces[t] = 0;
devP->tmpMax[t] = -1;
devP->count[t] = sz - 4;
}
__global__ void tmfgParallel(Node* devN, Params devP, int* respMax, int offset)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int sz = devN->sz, perm = devN->perm;
extern __shared__ int graph[];
for (int i = threadIdx.x; i < sz * sz; i += blockDim.x)
graph[i] = devN->graph[i];
__syncthreads();
if (x < perm) {
initializeDevice(&devP, sz, x);
generateList(devN, &devP, x, offset);
generateTriangularFaceList(devN, &devP, graph, x, offset);
tmfg(devN, &devP, graph, x);
atomicMax(respMax, devP.tmpMax[x]);
__syncthreads();
if (devP.tmpMax[x] == *respMax) {
copyGraph(devN, &devP, x);
}
__syncthreads();
}
}
int tmfgPrepare()
{
int finalResp = -1, pos = -1;
#pragma omp parallel for num_threads(GPU_CNT)
for (int gpu_id = 0; gpu_id < GPU_CNT; gpu_id++) {
cudaSetDevice(gpu_id);
int range = (int)ceil(PERM / (double)GPU_CNT);
int perm = ((gpu_id + 1) * range > PERM ? PERM - gpu_id * range : range);
int offset = gpu_id * range;
N->perm = perm;
int resp = -1, *tmpResp;
gpuErrChk(cudaMalloc((void**)&tmpResp, sizeof(int)));
gpuErrChk(cudaMemcpy(tmpResp, &resp, sizeof(int), cudaMemcpyHostToDevice));
Node* devN;
Params devP;
size_t sz = range * sizeof(int) * 3 + range * sizeof(int) * SIZE + range * sizeof(int) * (6 * SIZE);
printf("Using %d mbytes in Kernel %d\n", (sz + sizeof(Node)) / (1 << 20), gpu_id);
fprintf(stderr, "Using %d mbytes in Kernel %d\n", (sz + sizeof(Node)) / (1 << 20), gpu_id);
gpuErrChk(cudaMalloc((void**)&devN, sizeof(Node)));
gpuErrChk(cudaMemcpy(devN, N, sizeof(Node), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc((void**)&devP.faces, perm * sizeof(int)));
gpuErrChk(cudaMalloc((void**)&devP.count, perm * sizeof(int)));
gpuErrChk(cudaMalloc((void**)&devP.tmpMax, perm * sizeof(int)));
gpuErrChk(cudaMalloc((void**)&devP.F, 6 * SIZE * perm * sizeof(int)));
gpuErrChk(cudaMalloc((void**)&devP.V, SIZE * perm * sizeof(int)));
dim3 blocks(perm / THREADS + 1, 1);
dim3 threads(THREADS, 1);
printf("Kernel %d launched with %d blocks, each w/ %d threads\n", gpu_id, range / THREADS + 1, THREADS);
fprintf(stderr, "Kernel %d launched with %d blocks, each w/ %d threads\n", gpu_id, range / THREADS + 1, THREADS);
tmfgParallel<<<blocks, threads, SIZE * SIZE * sizeof(int)>>>(devN, devP, tmpResp, offset);
gpuErrChk(cudaDeviceSynchronize());
//copy back the maximum weight and the set of faces
//which gave this result
gpuErrChk(cudaMemcpy(&resp, tmpResp, sizeof(int), cudaMemcpyDeviceToHost));
printf("Kernel finished.\nLocal maximum found in Kernel %d: %d\n", gpu_id, resp);
printf("Copying results...\n");
#pragma omp critical
{
if (resp > finalResp) {
finalResp = resp;
pos = gpu_id;
}
}
// #pragma omp barrier
if (pos == gpu_id) {
gpuErrChk(cudaMemcpy(&F, devN->F_ANS, 6 * MAX * sizeof(int), cudaMemcpyDeviceToHost));
}
printf("Freeing memory...\n");
gpuErrChk(cudaFree(devN));
gpuErrChk(cudaFree(devP.faces));
gpuErrChk(cudaFree(devP.count));
gpuErrChk(cudaFree(devP.tmpMax));
gpuErrChk(cudaFree(devP.F));
gpuErrChk(cudaFree(devP.V));
gpuErrChk(cudaDeviceReset());
}
return finalResp;
}
void printElapsedTime(double start, double stop)
{
double elapsed = stop - start;
printf("Elapsed time: %.3lfs.\n", elapsed);
}
double getTime()
{
timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
return double(ts.tv_sec) + double(ts.tv_nsec) / 1e9;
}
/*
C ---> Size of the combination
index ---> Current index in data[]
data[] ---> Temporary array to store a current combination
i ---> Index of current element in vertices[]
*/
void combineUntil(int index, vector<int>& data, int i)
{
// Current cobination is ready, print it
if (index == C) {
for (int j = 0; j < C; j++) {
N->seeds[qtd * C + j] = data[j];
}
qtd++;
return;
}
// When there are no more elements to put in data[]
if (i >= SIZE)
return;
//current is inserted; put next at a next location
data[index] = i;
combineUntil(index + 1, data, i + 1);
//current is deleted; replace it with next
combineUntil(index, data, i + 1);
}
void combine()
{
vector<int> data(C);
//print all combinations of size 'r' using a temporary array 'data'
combineUntil(0, data, 0);
}
void initialize()
{
for (int i = 0; i < SIZE; i++) {
for (int j = i + 1; j < SIZE; j++) {
R[i * SIZE + j] = R[j * SIZE + i] = -1;
}
}
}
void readInput()
{
int x;
cin >> SIZE;
PERM = bib[SIZE - 1];
N = (Node*)malloc(sizeof(Node));
N->sz = SIZE;
for (int i = 0; i < SIZE; i++) {
for (int j = i + 1; j < SIZE; j++) {
cin >> x;
N->graph[i * SIZE + j] = x;
N->graph[j * SIZE + i] = x;
}
}
}
/*
Define the number of permutations and blocks
*/
void sizeDefinitions()
{
for (int i = 6; i <= MAX; i++) {
int resp = 1;
for (int j = i - 3; j <= i; j++)
resp *= j;
resp /= 24;
bib[i - 1] = resp;
}
}
int main(int argv, char** argc)
{
ios::sync_with_stdio(false);
sizeDefinitions();
//read the input, which is given by a size of a graph and its weighted edges.
//the given graph is dense.
readInput();
initialize();
//given the number of vertices, generate multiple 4-clique seeds
combine();
if (argv == 2) {
cudaSetDevice(atoi(argc[1]));
} else if (argv == 3) {
GPU_CNT = atoi(argc[2]);
int d;
cudaGetDeviceCount(&d);
if (GPU_CNT > d)
GPU_CNT = d;
}
//cout << gpuCount << endl;
//cudaSetDevice(dv);
double start = getTime();
int respMax = tmfgPrepare();
double stop = getTime();
//reconstruct the graph given the regions of the graph
for (int i = 0; i < 2 * SIZE; i++) {
int va = F[i * 3], vb = F[i * 3 + 1], vc = F[i * 3 + 2];
if (va == vb && vb == vc)
continue;
R[va * SIZE + vb] = R[vb * SIZE + va] = N->graph[va * SIZE + vb];
R[va * SIZE + vc] = R[vc * SIZE + va] = N->graph[va * SIZE + vc];
R[vb * SIZE + vc] = R[vc * SIZE + vb] = N->graph[vb * SIZE + vc];
}
cout << "Printing generated graph: " << endl;
for (int i = 0; i < SIZE; i++) {
for (int j = i + 1; j < SIZE; j++) {
cout << R[i * SIZE + j] << " ";
}
cout << endl;
}
printElapsedTime(start, stop);
cout << "Maximum weight found: " << respMax << endl;
free(N);
return 0;
}
|
e1d47c7e68bcefcd141f5f7513c71313c231f6f1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define N 20000
#define GRID_D1 20
#define GRID_D2 2
#define BLOCK_D1 512
#define BLOCK_D2 1
#define BLOCK_D3 1
// this is the kernel function called for each thread
// we use the CUDA variables {threadIdx, blockIdx, blockDim, gridDim} to determine a unique ID for each thread
__global__ void hello(void)
{
// id of the block
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
// size of each block (within grid of blocks)
int blocksize = blockDim.x * blockDim.y * blockDim.z;
// id of thread in a given block
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
// assign overall id/index of the thread
int idx = myblock * blocksize + subthread;
if(idx < 2000 || idx > 19000) {
// print buffer from within the kernel is limited so only print for first and last chunks of thread
if (idx < N){
printf("Hello World! My block index is (%d, %d) [Grid dims=(%d,%d)], 3D-thread index within blocks=(%d,%d,%d) => \
thread index=%d\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y, threadIdx.x, threadIdx.y, threadIdx.z, idx);
} else {
printf("Hello world! My block index is (%d,%d) [Grid dims=(%d,%d)], 3D-thread index within block=(%d,%d,%d) => \
thread index=%d [### this thread would not be used for N=%d ###]\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y,
threadIdx.x, threadIdx.y, threadIdx.z, idx, N);
}
}
}
int main(int argc, char **argv)
{
// objects containing the block and grid info
const dim3 blockSize(BLOCK_D1, BLOCK_D2, BLOCK_D3);
const dim3 gridSize(GRID_D1, GRID_D2, 1);
int nthreads = BLOCK_D1*BLOCK_D2*BLOCK_D3*GRID_D1*GRID_D2;
if (nthreads < N){
printf("\n================ NOT ENOUGH THREADS TO COVER N=%d ===================\n\n",N);
} else {
printf("Launching %d threads (N=%d)\n", nthreads, N);
}
// launch the kernel on the specified grid of thread blocks
hipLaunchKernelGGL(( hello), dim3(gridSize), dim3(blockSize), 0, 0, );
// Need to flush prints, otherwise none of the prints from within the kernel will show up
// as program exit does not flush the print buffer.
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr) {
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
} else {
printf("kernel launch success!\n");
}
printf("That's all!\n");
return 0;
}
|
e1d47c7e68bcefcd141f5f7513c71313c231f6f1.cu
|
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 20000
#define GRID_D1 20
#define GRID_D2 2
#define BLOCK_D1 512
#define BLOCK_D2 1
#define BLOCK_D3 1
// this is the kernel function called for each thread
// we use the CUDA variables {threadIdx, blockIdx, blockDim, gridDim} to determine a unique ID for each thread
__global__ void hello(void)
{
// id of the block
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
// size of each block (within grid of blocks)
int blocksize = blockDim.x * blockDim.y * blockDim.z;
// id of thread in a given block
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
// assign overall id/index of the thread
int idx = myblock * blocksize + subthread;
if(idx < 2000 || idx > 19000) {
// print buffer from within the kernel is limited so only print for first and last chunks of thread
if (idx < N){
printf("Hello World! My block index is (%d, %d) [Grid dims=(%d,%d)], 3D-thread index within blocks=(%d,%d,%d) => \
thread index=%d\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y, threadIdx.x, threadIdx.y, threadIdx.z, idx);
} else {
printf("Hello world! My block index is (%d,%d) [Grid dims=(%d,%d)], 3D-thread index within block=(%d,%d,%d) => \
thread index=%d [### this thread would not be used for N=%d ###]\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y,
threadIdx.x, threadIdx.y, threadIdx.z, idx, N);
}
}
}
int main(int argc, char **argv)
{
// objects containing the block and grid info
const dim3 blockSize(BLOCK_D1, BLOCK_D2, BLOCK_D3);
const dim3 gridSize(GRID_D1, GRID_D2, 1);
int nthreads = BLOCK_D1*BLOCK_D2*BLOCK_D3*GRID_D1*GRID_D2;
if (nthreads < N){
printf("\n================ NOT ENOUGH THREADS TO COVER N=%d ===================\n\n",N);
} else {
printf("Launching %d threads (N=%d)\n", nthreads, N);
}
// launch the kernel on the specified grid of thread blocks
hello<<<gridSize, blockSize>>>();
// Need to flush prints, otherwise none of the prints from within the kernel will show up
// as program exit does not flush the print buffer.
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr) {
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
} else {
printf("kernel launch success!\n");
}
printf("That's all!\n");
return 0;
}
|
b5281e241ef15224b8a9773cee1132a0c7923cb7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "blocks.cuh"
#include "encoder.cuh"
/* #define B 4
#define S 128
#define H 8
#define P 32
#define N (H * P)
#define U (4 * N) */
struct dimB { enum { value = sizeB }; };
struct dimK { enum { value = sizeS }; };
struct dimJ { enum { value = sizeS }; };
struct dimH { enum { value = sizeH }; };
struct dimP { enum { value = sizeP }; };
struct dimN { enum { value = sizeI }; };
struct dimU { enum { value = sizeU }; };
struct dimQKV { enum { value = 3 }; };
using lX = metal::list<dimB, dimJ, dimN>;
using lK = metal::list<dimB, dimK, dimN>;
using lQ = metal::list<dimB, dimJ, dimN>;
using lV = metal::list<dimB, dimK, dimN>;
using lWKQV = metal::list<dimQKV, dimP, dimH, dimN>;
using lWK = metal::list<dimP, dimH, dimN>;
using lWQ = metal::list<dimP, dimH, dimN>;
using lWV = metal::list<dimP, dimH, dimN>;
using lKKQQVV = metal::list<dimQKV, dimP, dimH, dimB, dimJ>;
using lKK = metal::list<dimP, dimH, dimB, dimK>;
using lQQ = metal::list<dimP, dimH, dimB, dimJ>;
using lVV = metal::list<dimP, dimH, dimB, dimK>;
using lBETA = metal::list<dimH, dimB, dimJ, dimK>;
using lALPHA = metal::list<dimH, dimB, dimJ, dimK>;
using lGAMMA = metal::list<dimP, dimH, dimB, dimJ>;
using lWO = metal::list<dimP, dimH, dimN>;
using lATT = metal::list<dimB, dimJ, dimN>;
using lDROP1MASK = metal::list<dimB, dimJ, dimN>;
using lSB1 = metal::list<dimB, dimJ, dimN>;
using lS1 = metal::list<dimN>;
using lB1 = metal::list<dimN>;
using lLINB1 = metal::list<dimU>;
using lLINW1 = metal::list<dimU, dimN>;
using lSB1_LINW1 = metal::list<dimB, dimJ, dimU>;
using lDROP2 = metal::list<dimB, dimJ, dimU>;
using lLIN1 = metal::list<dimB, dimJ, dimU>;
using lLINB2 = metal::list<dimN>;
using lLINW2 = metal::list<dimN, dimU>;
using lDROP2_LINW2 = metal::list<dimB, dimJ, dimN>;
using lLIN2 = metal::list<dimB, dimJ, dimN>;
using lS2 = metal::list<dimN>;
using lDS2 = metal::list<dimN>;
using lB2 = metal::list<dimN>;
using lDB2 = metal::list<dimN>;
using lSB2 = metal::list<dimB, dimJ, dimN>;
using lDSB2 = metal::list<dimB, dimJ, dimN>;
using lLN2 = metal::list<dimB, dimJ, dimN>;
using lDLN2 = metal::list<dimB, dimJ, dimN>;
using lLN2STD = metal::list<dimB, dimJ>;
using lLN2DIFF = metal::list<dimB, dimJ, dimN>;
using lDROP3MASK = metal::list<dimB, dimJ, dimN>;
using lDRESID2 = metal::list<dimB, dimJ, dimN>;
using lDLIN2 = metal::list<dimB, dimJ, dimN>;
using lDDROP2 = metal::list<dimB, dimJ, dimU>;
using lDLINW2 = metal::list<dimN, dimU>;
using lDROP2MASK = metal::list<dimB, dimJ, dimU>;
using lACT = metal::list<dimB, dimJ, dimU>;
using lDLIN1 = metal::list<dimB, dimJ, dimU>;
using lDLIN1_LINW1 = metal::list<dimB, dimJ, dimN>;
using lDLINW1 = metal::list<dimU, dimN>;
using lLN1 = metal::list<dimB, dimJ, dimN>;
using lDLN1 = metal::list<dimB, dimJ, dimN>;
using lDSB1 = metal::list<dimB, dimJ, dimN>;
using lDS1 = metal::list<dimN>;
using lDB1 = metal::list<dimN>;
using lDLINB2 = metal::list<dimN>;
using lDLINB1 = metal::list<dimU>;
using lLN1STD = metal::list<dimB, dimJ>;
using lLN1DIFF = metal::list<dimB, dimJ, dimN>;
using lDRESID1 = metal::list<dimB, dimJ, dimN>;
using lDATT = metal::list<dimB, dimJ, dimN>;
using lDXATT = metal::list<dimB, dimJ, dimN>;
using lDX = metal::list<dimB, dimJ, dimN>;
using lDGAMMA = metal::list<dimP, dimH, dimB, dimJ>;
using lDVV = metal::list<dimP, dimH, dimB, dimK>;
using lDWV = metal::list<dimP, dimH, dimN>;
using lDALPHA = metal::list<dimH, dimB, dimJ, dimK>;
using lDBETA = metal::list<dimH, dimB, dimJ, dimK>;
using lDQQ = metal::list<dimP, dimH, dimB, dimJ>;
using lDWQ = metal::list<dimP, dimH, dimN>;
using lDKK = metal::list<dimP, dimH, dimB, dimK>;
using lDWK = metal::list<dimP, dimH, dimN>;
using lDKKQQVV = metal::list<dimQKV, dimP, dimH, dimB, dimJ>;
using lDWO = metal::list<dimP, dimH, dimN>;
using lBKQV = metal::list<dimQKV, dimH, dimP>;
using lWKK = metal::list<dimP, dimH, dimB, dimK>;
using lWQQ = metal::list<dimP, dimH, dimB, dimJ>;
using lWVV = metal::list<dimP, dimH, dimB, dimK>;
using lWKKself = metal::list<dimP, dimH, dimB, dimJ>;
using lWQQself = metal::list<dimP, dimH, dimB, dimJ>;
using lWVVself = metal::list<dimP, dimH, dimB, dimJ>;
using lBK = metal::list<dimH, dimP>;
using lBQ = metal::list<dimH, dimP>;
using lBV = metal::list<dimH, dimP>;
using lDBK = metal::list<dimH, dimP>;
using lDBQ = metal::list<dimH, dimP>;
using lDBV = metal::list<dimH, dimP>;
using lDKKself = metal::list<dimP, dimH, dimB, dimJ>;
using lDQQself = metal::list<dimP, dimH, dimB, dimJ>;
using lDVVself = metal::list<dimP, dimH, dimB, dimJ>;
using lKKself = metal::list<dimP, dimH, dimB, dimJ>;
using lQQself = metal::list<dimP, dimH, dimB, dimJ>;
using lVVself = metal::list<dimP, dimH, dimB, dimJ>;
using lBO = metal::list<dimN>;
using lDBO = metal::list<dimN>;
using lATTN_DROP = metal::list<dimH, dimB, dimJ, dimK>;
using lATTN_DROP_MASK = metal::list<dimH, dimB, dimJ, dimK>;
using lDATTN_DROP = metal::list<dimH, dimB, dimJ, dimK>;
using layoutsList = metal::list<ENCODER_LAYOUTS_LIST>;
using Real = half;
using Enc = Encoder<Real, dimB, dimK, dimJ, dimH, dimP, dimN, dimU, dimQKV, layoutsList>;
extern "C" Enc* init() {
return new Enc();
}
extern "C" void encoder_forward(Enc* encoder, Real* X
// weights
, Real* WKQV
, Real* BKQV
, Real* WO
, Real* BO
, Real* S1
, Real* B1
, Real* LINB1
, Real* LINW1
, Real* S2
, Real* B2
, Real* LINB2
, Real* LINW2
// interm
, Real* KKQQVV
, Real* BETA
, Real* ALPHA
, Real* ATTN_DROP_MASK
, Real* ATTN_DROP
, Real* GAMMA
, Real* ATT
, Real* DROP1MASK
, Real* SB1
, Real* SB1_LINW1
, Real* DROP2
, Real* LIN1
, Real* DROP2_LINW2
, Real* LIN2
, Real* LN2
, Real* LN2STD
, Real* LN2DIFF
, Real* DROP2MASK
, Real* DROP3MASK
, Real* LN1
, Real* ACT
, Real* LN1STD
, Real* LN1DIFF
// out
, Real* Y)
{
encoder->encoder_forward(X
, WKQV
, BKQV
, WO
, BO
, S1
, B1
, LINB1
, LINW1
, S2
, B2
, LINB2
, LINW2
, KKQQVV
, BETA
, ALPHA
, ATTN_DROP_MASK
, ATTN_DROP
, GAMMA
, ATT
, DROP1MASK
, SB1
, SB1_LINW1
, DROP2
, LIN1
, DROP2_LINW2
, LIN2
, LN2
, LN2STD
, LN2DIFF
, DROP2MASK
, DROP3MASK
, LN1
, ACT
, LN1STD
, LN1DIFF
, Y);
}
extern "C" void encoder_backward(Enc* encoder
, Real* DY
// param_gradients
, Real* DWKQV
, Real* DBKQV
, Real* DWO
, Real* DBO
, Real* DS1
, Real* DB1
, Real* DLINB1
, Real* DLINW1
, Real* DS2
, Real* DB2
, Real* DLINB2
, Real* DLINW2
// backward intermediate
, Real* DLN2
, Real* DRESID2
, Real* DLIN2
, Real* DDROP2
, Real* DLIN1
, Real* DLIN1_LINW1
, Real* DLN1
, Real* DSB1
, Real* DRESID1
, Real* DATT
, Real* DXATT
, Real* DGAMMA
, Real* DATTN_DROP
, Real* DALPHA
, Real* DBETA
, Real* DKKQQVV
// weights
, Real* WKQV
, Real* BKQV
, Real* WO
, Real* BO
, Real* S1
, Real* B1
, Real* LINB1
, Real* LINW1
, Real* S2
, Real* B2
, Real* LINB2
, Real* LINW2
// forward intermediate
, Real* KKQQVV
, Real* BETA
, Real* ALPHA
, Real* ATTN_DROP_MASK
, Real* ATTN_DROP
, Real* GAMMA
, Real* ATT
, Real* DROP1MASK
, Real* SB1
, Real* SB1_LINW1
, Real* DROP2
, Real* LIN1
, Real* DROP2_LINW2
, Real* LIN2
, Real* LN2
, Real* LN2STD
, Real* LN2DIFF
, Real* DROP2MASK
, Real* DROP3MASK
, Real* LN1
, Real* ACT
, Real* LN1STD
, Real* LN1DIFF
//
, Real* X
//
, Real* DX
) {
encoder->encoder_backward(
DY
// param_gradients
, DWKQV
, DBKQV
, DWO
, DBO
, DS1
, DB1
, DLINB1
, DLINW1
, DS2
, DB2
, DLINB2
, DLINW2
// backward intermediate
, DLN2
, DRESID2
, DLIN2
, DDROP2
, DLIN1
, DLIN1_LINW1
, DLN1
, DSB1
, DRESID1
, DATT
, DXATT
, DGAMMA
, DATTN_DROP
, DALPHA
, DBETA
, DKKQQVV
// weights
, WKQV
, BKQV
, WO
, BO
, S1
, B1
, LINB1
, LINW1
, S2
, B2
, LINB2
, LINW2
// forward intermediate
, KKQQVV
, BETA
, ALPHA
, ATTN_DROP_MASK
, ATTN_DROP
, GAMMA
, ATT
, DROP1MASK
, SB1
, SB1_LINW1
, DROP2
, LIN1
, DROP2_LINW2
, LIN2
, LN2
, LN2STD
, LN2DIFF
, DROP2MASK
, DROP3MASK
, LN1
, ACT
, LN1STD
, LN1DIFF
//
, X
//
, DX
);
}
extern "C" void destroy(Enc* encoder) {
delete encoder;
}
// extern "C" void softmax_test(double* in, double* out) {
// struct d1 { enum { value = 1 }; };
// struct d2 { enum { value = 1 }; };
// struct dv { enum { value = 4 }; };
// struct dr { enum { value = 64 }; };
// using lIN = metal::list<d1, d2, dr, dv>;
// using lOUT = metal::list<d1, d2, dr, dv>;
// Softmax<double, d1, d2, dv, dr, lIN, lOUT>::run(in, out, 0);
// hipStreamSynchronize(0);
// }
|
b5281e241ef15224b8a9773cee1132a0c7923cb7.cu
|
#include "blocks.cuh"
#include "encoder.cuh"
/* #define B 4
#define S 128
#define H 8
#define P 32
#define N (H * P)
#define U (4 * N) */
struct dimB { enum { value = sizeB }; };
struct dimK { enum { value = sizeS }; };
struct dimJ { enum { value = sizeS }; };
struct dimH { enum { value = sizeH }; };
struct dimP { enum { value = sizeP }; };
struct dimN { enum { value = sizeI }; };
struct dimU { enum { value = sizeU }; };
struct dimQKV { enum { value = 3 }; };
using lX = metal::list<dimB, dimJ, dimN>;
using lK = metal::list<dimB, dimK, dimN>;
using lQ = metal::list<dimB, dimJ, dimN>;
using lV = metal::list<dimB, dimK, dimN>;
using lWKQV = metal::list<dimQKV, dimP, dimH, dimN>;
using lWK = metal::list<dimP, dimH, dimN>;
using lWQ = metal::list<dimP, dimH, dimN>;
using lWV = metal::list<dimP, dimH, dimN>;
using lKKQQVV = metal::list<dimQKV, dimP, dimH, dimB, dimJ>;
using lKK = metal::list<dimP, dimH, dimB, dimK>;
using lQQ = metal::list<dimP, dimH, dimB, dimJ>;
using lVV = metal::list<dimP, dimH, dimB, dimK>;
using lBETA = metal::list<dimH, dimB, dimJ, dimK>;
using lALPHA = metal::list<dimH, dimB, dimJ, dimK>;
using lGAMMA = metal::list<dimP, dimH, dimB, dimJ>;
using lWO = metal::list<dimP, dimH, dimN>;
using lATT = metal::list<dimB, dimJ, dimN>;
using lDROP1MASK = metal::list<dimB, dimJ, dimN>;
using lSB1 = metal::list<dimB, dimJ, dimN>;
using lS1 = metal::list<dimN>;
using lB1 = metal::list<dimN>;
using lLINB1 = metal::list<dimU>;
using lLINW1 = metal::list<dimU, dimN>;
using lSB1_LINW1 = metal::list<dimB, dimJ, dimU>;
using lDROP2 = metal::list<dimB, dimJ, dimU>;
using lLIN1 = metal::list<dimB, dimJ, dimU>;
using lLINB2 = metal::list<dimN>;
using lLINW2 = metal::list<dimN, dimU>;
using lDROP2_LINW2 = metal::list<dimB, dimJ, dimN>;
using lLIN2 = metal::list<dimB, dimJ, dimN>;
using lS2 = metal::list<dimN>;
using lDS2 = metal::list<dimN>;
using lB2 = metal::list<dimN>;
using lDB2 = metal::list<dimN>;
using lSB2 = metal::list<dimB, dimJ, dimN>;
using lDSB2 = metal::list<dimB, dimJ, dimN>;
using lLN2 = metal::list<dimB, dimJ, dimN>;
using lDLN2 = metal::list<dimB, dimJ, dimN>;
using lLN2STD = metal::list<dimB, dimJ>;
using lLN2DIFF = metal::list<dimB, dimJ, dimN>;
using lDROP3MASK = metal::list<dimB, dimJ, dimN>;
using lDRESID2 = metal::list<dimB, dimJ, dimN>;
using lDLIN2 = metal::list<dimB, dimJ, dimN>;
using lDDROP2 = metal::list<dimB, dimJ, dimU>;
using lDLINW2 = metal::list<dimN, dimU>;
using lDROP2MASK = metal::list<dimB, dimJ, dimU>;
using lACT = metal::list<dimB, dimJ, dimU>;
using lDLIN1 = metal::list<dimB, dimJ, dimU>;
using lDLIN1_LINW1 = metal::list<dimB, dimJ, dimN>;
using lDLINW1 = metal::list<dimU, dimN>;
using lLN1 = metal::list<dimB, dimJ, dimN>;
using lDLN1 = metal::list<dimB, dimJ, dimN>;
using lDSB1 = metal::list<dimB, dimJ, dimN>;
using lDS1 = metal::list<dimN>;
using lDB1 = metal::list<dimN>;
using lDLINB2 = metal::list<dimN>;
using lDLINB1 = metal::list<dimU>;
using lLN1STD = metal::list<dimB, dimJ>;
using lLN1DIFF = metal::list<dimB, dimJ, dimN>;
using lDRESID1 = metal::list<dimB, dimJ, dimN>;
using lDATT = metal::list<dimB, dimJ, dimN>;
using lDXATT = metal::list<dimB, dimJ, dimN>;
using lDX = metal::list<dimB, dimJ, dimN>;
using lDGAMMA = metal::list<dimP, dimH, dimB, dimJ>;
using lDVV = metal::list<dimP, dimH, dimB, dimK>;
using lDWV = metal::list<dimP, dimH, dimN>;
using lDALPHA = metal::list<dimH, dimB, dimJ, dimK>;
using lDBETA = metal::list<dimH, dimB, dimJ, dimK>;
using lDQQ = metal::list<dimP, dimH, dimB, dimJ>;
using lDWQ = metal::list<dimP, dimH, dimN>;
using lDKK = metal::list<dimP, dimH, dimB, dimK>;
using lDWK = metal::list<dimP, dimH, dimN>;
using lDKKQQVV = metal::list<dimQKV, dimP, dimH, dimB, dimJ>;
using lDWO = metal::list<dimP, dimH, dimN>;
using lBKQV = metal::list<dimQKV, dimH, dimP>;
using lWKK = metal::list<dimP, dimH, dimB, dimK>;
using lWQQ = metal::list<dimP, dimH, dimB, dimJ>;
using lWVV = metal::list<dimP, dimH, dimB, dimK>;
using lWKKself = metal::list<dimP, dimH, dimB, dimJ>;
using lWQQself = metal::list<dimP, dimH, dimB, dimJ>;
using lWVVself = metal::list<dimP, dimH, dimB, dimJ>;
using lBK = metal::list<dimH, dimP>;
using lBQ = metal::list<dimH, dimP>;
using lBV = metal::list<dimH, dimP>;
using lDBK = metal::list<dimH, dimP>;
using lDBQ = metal::list<dimH, dimP>;
using lDBV = metal::list<dimH, dimP>;
using lDKKself = metal::list<dimP, dimH, dimB, dimJ>;
using lDQQself = metal::list<dimP, dimH, dimB, dimJ>;
using lDVVself = metal::list<dimP, dimH, dimB, dimJ>;
using lKKself = metal::list<dimP, dimH, dimB, dimJ>;
using lQQself = metal::list<dimP, dimH, dimB, dimJ>;
using lVVself = metal::list<dimP, dimH, dimB, dimJ>;
using lBO = metal::list<dimN>;
using lDBO = metal::list<dimN>;
using lATTN_DROP = metal::list<dimH, dimB, dimJ, dimK>;
using lATTN_DROP_MASK = metal::list<dimH, dimB, dimJ, dimK>;
using lDATTN_DROP = metal::list<dimH, dimB, dimJ, dimK>;
using layoutsList = metal::list<ENCODER_LAYOUTS_LIST>;
using Real = half;
using Enc = Encoder<Real, dimB, dimK, dimJ, dimH, dimP, dimN, dimU, dimQKV, layoutsList>;
extern "C" Enc* init() {
return new Enc();
}
extern "C" void encoder_forward(Enc* encoder, Real* X
// weights
, Real* WKQV
, Real* BKQV
, Real* WO
, Real* BO
, Real* S1
, Real* B1
, Real* LINB1
, Real* LINW1
, Real* S2
, Real* B2
, Real* LINB2
, Real* LINW2
// interm
, Real* KKQQVV
, Real* BETA
, Real* ALPHA
, Real* ATTN_DROP_MASK
, Real* ATTN_DROP
, Real* GAMMA
, Real* ATT
, Real* DROP1MASK
, Real* SB1
, Real* SB1_LINW1
, Real* DROP2
, Real* LIN1
, Real* DROP2_LINW2
, Real* LIN2
, Real* LN2
, Real* LN2STD
, Real* LN2DIFF
, Real* DROP2MASK
, Real* DROP3MASK
, Real* LN1
, Real* ACT
, Real* LN1STD
, Real* LN1DIFF
// out
, Real* Y)
{
encoder->encoder_forward(X
, WKQV
, BKQV
, WO
, BO
, S1
, B1
, LINB1
, LINW1
, S2
, B2
, LINB2
, LINW2
, KKQQVV
, BETA
, ALPHA
, ATTN_DROP_MASK
, ATTN_DROP
, GAMMA
, ATT
, DROP1MASK
, SB1
, SB1_LINW1
, DROP2
, LIN1
, DROP2_LINW2
, LIN2
, LN2
, LN2STD
, LN2DIFF
, DROP2MASK
, DROP3MASK
, LN1
, ACT
, LN1STD
, LN1DIFF
, Y);
}
extern "C" void encoder_backward(Enc* encoder
, Real* DY
// param_gradients
, Real* DWKQV
, Real* DBKQV
, Real* DWO
, Real* DBO
, Real* DS1
, Real* DB1
, Real* DLINB1
, Real* DLINW1
, Real* DS2
, Real* DB2
, Real* DLINB2
, Real* DLINW2
// backward intermediate
, Real* DLN2
, Real* DRESID2
, Real* DLIN2
, Real* DDROP2
, Real* DLIN1
, Real* DLIN1_LINW1
, Real* DLN1
, Real* DSB1
, Real* DRESID1
, Real* DATT
, Real* DXATT
, Real* DGAMMA
, Real* DATTN_DROP
, Real* DALPHA
, Real* DBETA
, Real* DKKQQVV
// weights
, Real* WKQV
, Real* BKQV
, Real* WO
, Real* BO
, Real* S1
, Real* B1
, Real* LINB1
, Real* LINW1
, Real* S2
, Real* B2
, Real* LINB2
, Real* LINW2
// forward intermediate
, Real* KKQQVV
, Real* BETA
, Real* ALPHA
, Real* ATTN_DROP_MASK
, Real* ATTN_DROP
, Real* GAMMA
, Real* ATT
, Real* DROP1MASK
, Real* SB1
, Real* SB1_LINW1
, Real* DROP2
, Real* LIN1
, Real* DROP2_LINW2
, Real* LIN2
, Real* LN2
, Real* LN2STD
, Real* LN2DIFF
, Real* DROP2MASK
, Real* DROP3MASK
, Real* LN1
, Real* ACT
, Real* LN1STD
, Real* LN1DIFF
//
, Real* X
//
, Real* DX
) {
encoder->encoder_backward(
DY
// param_gradients
, DWKQV
, DBKQV
, DWO
, DBO
, DS1
, DB1
, DLINB1
, DLINW1
, DS2
, DB2
, DLINB2
, DLINW2
// backward intermediate
, DLN2
, DRESID2
, DLIN2
, DDROP2
, DLIN1
, DLIN1_LINW1
, DLN1
, DSB1
, DRESID1
, DATT
, DXATT
, DGAMMA
, DATTN_DROP
, DALPHA
, DBETA
, DKKQQVV
// weights
, WKQV
, BKQV
, WO
, BO
, S1
, B1
, LINB1
, LINW1
, S2
, B2
, LINB2
, LINW2
// forward intermediate
, KKQQVV
, BETA
, ALPHA
, ATTN_DROP_MASK
, ATTN_DROP
, GAMMA
, ATT
, DROP1MASK
, SB1
, SB1_LINW1
, DROP2
, LIN1
, DROP2_LINW2
, LIN2
, LN2
, LN2STD
, LN2DIFF
, DROP2MASK
, DROP3MASK
, LN1
, ACT
, LN1STD
, LN1DIFF
//
, X
//
, DX
);
}
extern "C" void destroy(Enc* encoder) {
delete encoder;
}
// extern "C" void softmax_test(double* in, double* out) {
// struct d1 { enum { value = 1 }; };
// struct d2 { enum { value = 1 }; };
// struct dv { enum { value = 4 }; };
// struct dr { enum { value = 64 }; };
// using lIN = metal::list<d1, d2, dr, dv>;
// using lOUT = metal::list<d1, d2, dr, dv>;
// Softmax<double, d1, d2, dv, dr, lIN, lOUT>::run(in, out, 0);
// cudaStreamSynchronize(0);
// }
|
6126ffc7804cfd240f0689805f2049fc595e9a7e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
//#include "hip/hip_runtime.h"
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <assert.h>
__device__ int index(int col, int row, int ord){
return (row *ord)+col;
}
__global__ void *Transpose(int *c, const int *a, int* blarh, int local_index){
int col = (blockDim.x * blockIdx.x) + threadIdx.x;
int row = (blockDim.y * blockIdx.y) + threadIdx.y;
c[index(row,col,4)] = a[index(col, row, 4)] ;
}
int main()
{
const int arraySize = 16;
const int a[arraySize] = { 1, 2, 3, 4, 5 ,6,7,8,9,10,11,12,13,14,15,16};
int c[arraySize] = { 0 };
int *dev_a = 0;
int *dev_c = 0;
// Allocate GPU buffers for three vectors (one input, one output) .
hipMalloc((void**)&dev_c, arraySize * sizeof(int));
hipMalloc((void**)&dev_a, arraySize * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
hipMemcpy(dev_a, a, arraySize * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_c, c, arraySize * sizeof(int), hipMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
dim3 dimgrid(2, 2);
dim3 dimblock(2, 2);
//Transpose<<<dimgrid, dimblock>>>(dev_c, dev_a);
verify_kernel(Transpose,dimgrid,dimblock,dev_c,dev_a, NULL);
// Copy output vector from GPU buffer to host memory.
hipMemcpy(c, dev_c, arraySize * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_c);
hipFree(dev_a);
for (int i = 0; i < arraySize; i++){
printf("%d ",c[i]);
if(i<3)
assert(c[i+1]==c[i]+4);
}
return 0;
}
|
6126ffc7804cfd240f0689805f2049fc595e9a7e.cu
|
#include <cuda.h>
//#include "cuda_runtime.h"
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <assert.h>
__device__ int index(int col, int row, int ord){
return (row *ord)+col;
}
__global__ void *Transpose(int *c, const int *a, int* blarh, int local_index){
int col = (blockDim.x * blockIdx.x) + threadIdx.x;
int row = (blockDim.y * blockIdx.y) + threadIdx.y;
c[index(row,col,4)] = a[index(col, row, 4)] ;
}
int main()
{
const int arraySize = 16;
const int a[arraySize] = { 1, 2, 3, 4, 5 ,6,7,8,9,10,11,12,13,14,15,16};
int c[arraySize] = { 0 };
int *dev_a = 0;
int *dev_c = 0;
// Allocate GPU buffers for three vectors (one input, one output) .
cudaMalloc((void**)&dev_c, arraySize * sizeof(int));
cudaMalloc((void**)&dev_a, arraySize * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_a, a, arraySize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, arraySize * sizeof(int), cudaMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
dim3 dimgrid(2, 2);
dim3 dimblock(2, 2);
//Transpose<<<dimgrid, dimblock>>>(dev_c, dev_a);
verify_kernel(Transpose,dimgrid,dimblock,dev_c,dev_a, NULL);
// Copy output vector from GPU buffer to host memory.
cudaMemcpy(c, dev_c, arraySize * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
for (int i = 0; i < arraySize; i++){
printf("%d ",c[i]);
if(i<3)
assert(c[i+1]==c[i]+4);
}
return 0;
}
|
f6e156a22da202d97bf1393f9de6b82608f81bff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<cuda.h>
#define BLOCK_DIM 16
__global__ void matrixAdd(int *a,int *b,int *c,int N);
int main(){
int curr=2;
int N=BLOCK_DIM*curr;
printf("------------------------------------------\n");
while(N<=BLOCK_DIM*16){
int a[N][N], b[N][N], gpu_sum[N][N],cpu_sum[N][N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
int size=N*N*sizeof(int);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
a[i][j]=i+j;
b[i][j]=i-j;
}
}
hipMalloc((void**) &dev_a,size);
hipMalloc((void**) &dev_b,size);
hipMalloc((void**) &dev_c,size);
hipEvent_t startinit,endinit;
hipEventCreate(&startinit);
hipEventCreate(&endinit);
hipEventRecord(startinit, 0);
hipMemcpy(dev_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size,hipMemcpyHostToDevice);
hipEventRecord(endinit, 0);
hipEventSynchronize(endinit);
hipEventElapsedTime(&timeinit, startinit, endinit);
hipEvent_t gpu_start,gpu_end;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_end);
hipEventRecord(gpu_start, 0);
dim3 dimBlock(BLOCK_DIM,BLOCK_DIM);
dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y));
hipLaunchKernelGGL(( matrixAdd), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_a,dev_b,dev_c,N);
hipDeviceSynchronize();
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end);
hipEventElapsedTime(&time_gpu, gpu_start, gpu_end);
hipEvent_t startindex,endindex;
hipEventCreate(&startindex);
hipEventCreate(&endindex);
hipEventRecord(startindex, 0);
hipMemcpy(gpu_sum,dev_c,size,hipMemcpyDeviceToHost);
hipEventRecord(endindex, 0);
hipEventSynchronize(endindex);
hipEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
cpu_sum[i][j]=a[i][j]+b[i][j];
}
}
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with %d blocks and %d threads\n",(int)ceil(N/dimBlock.x)*(int)ceil(N/dimBlock.y),BLOCK_DIM*BLOCK_DIM);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
if(gpu_sum[i][j]!=cpu_sum[i][j]){
flag=0;
break;
}
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
printf("------------------------------------------\n");
curr++;
N=BLOCK_DIM*curr;
}
}
__global__ void matrixAdd(int *a,int *b,int *c,int N){
int col=blockIdx.x*blockDim.x+threadIdx.x;
int row=blockIdx.y*blockDim.y+threadIdx.y;
int index=col+row*N;
if(col<N && row<N){
c[index]=a[index]+b[index];
}
}
|
f6e156a22da202d97bf1393f9de6b82608f81bff.cu
|
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<cuda.h>
#define BLOCK_DIM 16
__global__ void matrixAdd(int *a,int *b,int *c,int N);
int main(){
int curr=2;
int N=BLOCK_DIM*curr;
printf("------------------------------------------\n");
while(N<=BLOCK_DIM*16){
int a[N][N], b[N][N], gpu_sum[N][N],cpu_sum[N][N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
int size=N*N*sizeof(int);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
a[i][j]=i+j;
b[i][j]=i-j;
}
}
cudaMalloc((void**) &dev_a,size);
cudaMalloc((void**) &dev_b,size);
cudaMalloc((void**) &dev_c,size);
cudaEvent_t startinit,endinit;
cudaEventCreate(&startinit);
cudaEventCreate(&endinit);
cudaEventRecord(startinit, 0);
cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(endinit, 0);
cudaEventSynchronize(endinit);
cudaEventElapsedTime(&timeinit, startinit, endinit);
cudaEvent_t gpu_start,gpu_end;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_end);
cudaEventRecord(gpu_start, 0);
dim3 dimBlock(BLOCK_DIM,BLOCK_DIM);
dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y));
matrixAdd<<<dimGrid,dimBlock>>>(dev_a,dev_b,dev_c,N);
cudaDeviceSynchronize();
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end);
cudaEventElapsedTime(&time_gpu, gpu_start, gpu_end);
cudaEvent_t startindex,endindex;
cudaEventCreate(&startindex);
cudaEventCreate(&endindex);
cudaEventRecord(startindex, 0);
cudaMemcpy(gpu_sum,dev_c,size,cudaMemcpyDeviceToHost);
cudaEventRecord(endindex, 0);
cudaEventSynchronize(endindex);
cudaEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
cpu_sum[i][j]=a[i][j]+b[i][j];
}
}
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with %d blocks and %d threads\n",(int)ceil(N/dimBlock.x)*(int)ceil(N/dimBlock.y),BLOCK_DIM*BLOCK_DIM);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
if(gpu_sum[i][j]!=cpu_sum[i][j]){
flag=0;
break;
}
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
printf("------------------------------------------\n");
curr++;
N=BLOCK_DIM*curr;
}
}
__global__ void matrixAdd(int *a,int *b,int *c,int N){
int col=blockIdx.x*blockDim.x+threadIdx.x;
int row=blockIdx.y*blockDim.y+threadIdx.y;
int index=col+row*N;
if(col<N && row<N){
c[index]=a[index]+b[index];
}
}
|
816441ec4c79101a3c004855d3a746b7b7bf1120.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rectgrid_raytracing.cuh"
inline __device__ float calTMax(const float &dir, const float &interval, const int ¤t_index, const float ¤t_pos)
{
if (dir >= 0)
return float(current_index + 1)*interval - current_pos;
else
return current_pos - float(current_index)*interval;
}
template <typename T>
inline __host__ __device__ T absDivide(const T &denominator, const T &numerator)
{
if (numerator <= Epsilon && numerator >= -Epsilon)
return T(INT_MAX);
return abs(denominator / numerator);
}
// 3DDDA
// Intersect with the heliostat within this rectangle grid
__device__ bool Intersect(const float3 &orig, const float3 &dir,
const int &grid_address, const float3 *d_heliostat_vertexs,
const int const *d_grid_heliostat_match, const int const *d_grid_heliostat_index)
{
for (unsigned int i = d_grid_heliostat_index[grid_address]; i < d_grid_heliostat_index[grid_address + 1]; ++i)
{
unsigned int heliostat_index = 3 * d_grid_heliostat_match[i];
float u, v, t;
bool intersect = global_func::rayParallelogramIntersect(orig, dir, d_heliostat_vertexs[heliostat_index],
d_heliostat_vertexs[heliostat_index + 1],
d_heliostat_vertexs[heliostat_index + 2], t, u, v);
if (intersect)
return true;
}
return false;
}
__device__ bool NotCollision(const float3 &d_orig, const float3 &d_dir,
RectGrid &rectgrid, const float3 *d_helio_vertexs)
{
// Step 1 - Initialization
// Step 1.1 Initial current position of origin in the scene
int3 pos = make_int3((d_orig - rectgrid.pos_) / rectgrid.interval_);
// Step 1.2 StepX, StepY, StepZ
int3 Step;
Step.x = (d_dir.x >= 0) ? 1 : -1;
Step.y = (d_dir.y >= 0) ? 1 : -1;
Step.z = (d_dir.z >= 0) ? 1 : -1;
// Step 1.3 Initial tmaxX, tmaxY, tmaxZ
float3 tMax, t;
t.x = calTMax(d_dir.x, rectgrid.interval_.x, pos.x, d_orig.x - rectgrid.pos_.x);
t.y = calTMax(d_dir.y, rectgrid.interval_.y, pos.y, d_orig.y - rectgrid.pos_.y);
t.z = calTMax(d_dir.z, rectgrid.interval_.z, pos.z, d_orig.z - rectgrid.pos_.z);
tMax.x = absDivide(t.x, d_dir.x); // avoid divide 0
tMax.y = absDivide(t.y, d_dir.y);
tMax.z = absDivide(t.z, d_dir.z);
// Step 1.4 Initial tDeltaX, tDeltaY, tDeltaZ
float3 tDelta;
tDelta.x = absDivide(rectgrid.interval_.x, d_dir.x);// avoid divide 0
tDelta.y = absDivide(rectgrid.interval_.y, d_dir.y);
tDelta.z = absDivide(rectgrid.interval_.z, d_dir.z);
// Step 2 - Intersection
int3 grid_index = pos;
int grid_address = global_func::unroll_index(grid_index, rectgrid.grid_num_);
while (true)
{
if (tMax.x < tMax.y)
{
if (tMax.x < tMax.z)
{
grid_index.x += Step.x;
if (grid_index.x >= rectgrid.grid_num_.x || grid_index.x<0)
// Outside grid
return true;
tMax.x += tDelta.x;
}
else
{
grid_index.z += Step.z;
if (grid_index.z >= rectgrid.grid_num_.z || grid_index.z<0)
// Outside grid
return true;
tMax.z += tDelta.z;
}
}
else
{
if (tMax.y < tMax.z)
{
grid_index.y += Step.y;
if (grid_index.y >= rectgrid.grid_num_.y || grid_index.y<0)
// Outside grid
return true;
tMax.y += tDelta.y;
}
else
{
grid_index.z += Step.z;
if (grid_index.z >= rectgrid.grid_num_.z || grid_index.z<0)
// Outside grid
return true;
tMax.z += tDelta.z;
}
}
grid_address = global_func::unroll_index(grid_index, rectgrid.grid_num_);
if (Intersect(d_orig, d_dir,
grid_address, d_helio_vertexs, rectgrid.d_grid_helio_match_, rectgrid.d_grid_helio_index_))
return false;
}
return false;
}
// Step 3: intersect with receiver
inline __device__ float eta_aAlpha(const float &d)
{
if (d <= 1000.0f)
return 0.99331f - 0.0001176f*d + 1.97f*(1e-8f) * d*d;
else
return expf(-0.0001106f*d);
}
inline __device__ float calEnergy(const float3 &sun_dir, const float3 &normal, const float &eta)
{
float cosine = fabsf(dot(sun_dir, normal));
return cosine*eta;
}
__device__ void receiver_drawing(RectangleReceiver &receiver, const SunRay &sunray,
const float3 &orig, const float3 const &dir, const float3 const &normal)
{
// Step1: Intersect with receiver
float t, u, v;
bool intersect = receiver.GIntersect(orig, dir, t, u, v);
if (!receiver.GIntersect(orig, dir, t, u, v))
return;
int2 row_col = make_int2(u* receiver.resolution_.y, v* receiver.resolution_.x); // Intersect location
// Step2: Calculate the energy of the light
float eta = eta_aAlpha(t);
float energy = calEnergy(sunray.sun_dir_, normal, eta);
// Step3: Add the energy to the intersect position
int address = row_col.x*receiver.resolution_.x + row_col.y; //col_row.y + col_row.x*resolution.y;
atomicAdd(&(receiver.d_image_[address]), energy);
}
__global__ void map_tracing(const SunRay sunray, // sun
RectGrid grid, // grid
RectangleReceiver receiver, // receiver
const float3 *d_helio_vertexs, // 3 vertexs of heliostats
const float3 *d_microhelio_normals, // micro-heliostat's normal
const float3 *d_microhelio_center, // micro-heliostat's origins
const int *d_microhelio_start, // micro-heliostat's belonging group number
const int microhelio_num)
{
long long int myId = global_func::getThreadId();
if (myId >= microhelio_num*sunray.num_sunshape_lights_per_group_)
return;
// Step 1: whether the incident light is shadowed by other heliostats
int nLights = sunray.num_sunshape_lights_per_group_;
int nGroup = sunray.num_sunshape_groups_;
int address = (d_microhelio_start[myId / nLights] + myId%nLights)%(nLights*nGroup);
float3 dir = sunray.d_samplelights_[address]; // get the y-aligned direction
dir = global_func::local2world(dir, sunray.sun_dir_); // get the sun_direction-aligned direction
dir = -dir; // Since the sun direction is incident direction, reverse it
float3 orig = d_microhelio_center[myId / nLights]; // get the center of submirror
if (!NotCollision(orig, dir, grid, d_helio_vertexs))
return;
// Step 2: whether the reflect light is shadowed by other heliostats
float3 normal = d_microhelio_normals[myId / nLights];
int start_id = (myId / nLights - 1>0) ? (myId / nLights - 1) : 0;
address = (d_microhelio_start[start_id] + myId%nLights) % (nLights*nGroup);
float3 turbulence = sunray.d_perturbation_[address];
normal = global_func::local2world(turbulence, normal); normal = normalize(normal);
dir = -dir;
dir = reflect(dir, normal); // reflect light
dir = normalize(dir);
if (!NotCollision(orig, dir, grid, d_helio_vertexs))
return;
// Step 3: intersect with receiver
receiver_drawing(receiver, sunray, orig, dir, normal);
}
|
816441ec4c79101a3c004855d3a746b7b7bf1120.cu
|
#include "rectgrid_raytracing.cuh"
inline __device__ float calTMax(const float &dir, const float &interval, const int ¤t_index, const float ¤t_pos)
{
if (dir >= 0)
return float(current_index + 1)*interval - current_pos;
else
return current_pos - float(current_index)*interval;
}
template <typename T>
inline __host__ __device__ T absDivide(const T &denominator, const T &numerator)
{
if (numerator <= Epsilon && numerator >= -Epsilon)
return T(INT_MAX);
return abs(denominator / numerator);
}
// 3DDDA
// Intersect with the heliostat within this rectangle grid
__device__ bool Intersect(const float3 &orig, const float3 &dir,
const int &grid_address, const float3 *d_heliostat_vertexs,
const int const *d_grid_heliostat_match, const int const *d_grid_heliostat_index)
{
for (unsigned int i = d_grid_heliostat_index[grid_address]; i < d_grid_heliostat_index[grid_address + 1]; ++i)
{
unsigned int heliostat_index = 3 * d_grid_heliostat_match[i];
float u, v, t;
bool intersect = global_func::rayParallelogramIntersect(orig, dir, d_heliostat_vertexs[heliostat_index],
d_heliostat_vertexs[heliostat_index + 1],
d_heliostat_vertexs[heliostat_index + 2], t, u, v);
if (intersect)
return true;
}
return false;
}
__device__ bool NotCollision(const float3 &d_orig, const float3 &d_dir,
RectGrid &rectgrid, const float3 *d_helio_vertexs)
{
// Step 1 - Initialization
// Step 1.1 Initial current position of origin in the scene
int3 pos = make_int3((d_orig - rectgrid.pos_) / rectgrid.interval_);
// Step 1.2 StepX, StepY, StepZ
int3 Step;
Step.x = (d_dir.x >= 0) ? 1 : -1;
Step.y = (d_dir.y >= 0) ? 1 : -1;
Step.z = (d_dir.z >= 0) ? 1 : -1;
// Step 1.3 Initial tmaxX, tmaxY, tmaxZ
float3 tMax, t;
t.x = calTMax(d_dir.x, rectgrid.interval_.x, pos.x, d_orig.x - rectgrid.pos_.x);
t.y = calTMax(d_dir.y, rectgrid.interval_.y, pos.y, d_orig.y - rectgrid.pos_.y);
t.z = calTMax(d_dir.z, rectgrid.interval_.z, pos.z, d_orig.z - rectgrid.pos_.z);
tMax.x = absDivide(t.x, d_dir.x); // avoid divide 0
tMax.y = absDivide(t.y, d_dir.y);
tMax.z = absDivide(t.z, d_dir.z);
// Step 1.4 Initial tDeltaX, tDeltaY, tDeltaZ
float3 tDelta;
tDelta.x = absDivide(rectgrid.interval_.x, d_dir.x);// avoid divide 0
tDelta.y = absDivide(rectgrid.interval_.y, d_dir.y);
tDelta.z = absDivide(rectgrid.interval_.z, d_dir.z);
// Step 2 - Intersection
int3 grid_index = pos;
int grid_address = global_func::unroll_index(grid_index, rectgrid.grid_num_);
while (true)
{
if (tMax.x < tMax.y)
{
if (tMax.x < tMax.z)
{
grid_index.x += Step.x;
if (grid_index.x >= rectgrid.grid_num_.x || grid_index.x<0)
// Outside grid
return true;
tMax.x += tDelta.x;
}
else
{
grid_index.z += Step.z;
if (grid_index.z >= rectgrid.grid_num_.z || grid_index.z<0)
// Outside grid
return true;
tMax.z += tDelta.z;
}
}
else
{
if (tMax.y < tMax.z)
{
grid_index.y += Step.y;
if (grid_index.y >= rectgrid.grid_num_.y || grid_index.y<0)
// Outside grid
return true;
tMax.y += tDelta.y;
}
else
{
grid_index.z += Step.z;
if (grid_index.z >= rectgrid.grid_num_.z || grid_index.z<0)
// Outside grid
return true;
tMax.z += tDelta.z;
}
}
grid_address = global_func::unroll_index(grid_index, rectgrid.grid_num_);
if (Intersect(d_orig, d_dir,
grid_address, d_helio_vertexs, rectgrid.d_grid_helio_match_, rectgrid.d_grid_helio_index_))
return false;
}
return false;
}
// Step 3: intersect with receiver
inline __device__ float eta_aAlpha(const float &d)
{
if (d <= 1000.0f)
return 0.99331f - 0.0001176f*d + 1.97f*(1e-8f) * d*d;
else
return expf(-0.0001106f*d);
}
inline __device__ float calEnergy(const float3 &sun_dir, const float3 &normal, const float &eta)
{
float cosine = fabsf(dot(sun_dir, normal));
return cosine*eta;
}
__device__ void receiver_drawing(RectangleReceiver &receiver, const SunRay &sunray,
const float3 &orig, const float3 const &dir, const float3 const &normal)
{
// Step1: Intersect with receiver
float t, u, v;
bool intersect = receiver.GIntersect(orig, dir, t, u, v);
if (!receiver.GIntersect(orig, dir, t, u, v))
return;
int2 row_col = make_int2(u* receiver.resolution_.y, v* receiver.resolution_.x); // Intersect location
// Step2: Calculate the energy of the light
float eta = eta_aAlpha(t);
float energy = calEnergy(sunray.sun_dir_, normal, eta);
// Step3: Add the energy to the intersect position
int address = row_col.x*receiver.resolution_.x + row_col.y; //col_row.y + col_row.x*resolution.y;
atomicAdd(&(receiver.d_image_[address]), energy);
}
__global__ void map_tracing(const SunRay sunray, // sun
RectGrid grid, // grid
RectangleReceiver receiver, // receiver
const float3 *d_helio_vertexs, // 3 vertexs of heliostats
const float3 *d_microhelio_normals, // micro-heliostat's normal
const float3 *d_microhelio_center, // micro-heliostat's origins
const int *d_microhelio_start, // micro-heliostat's belonging group number
const int microhelio_num)
{
long long int myId = global_func::getThreadId();
if (myId >= microhelio_num*sunray.num_sunshape_lights_per_group_)
return;
// Step 1: whether the incident light is shadowed by other heliostats
int nLights = sunray.num_sunshape_lights_per_group_;
int nGroup = sunray.num_sunshape_groups_;
int address = (d_microhelio_start[myId / nLights] + myId%nLights)%(nLights*nGroup);
float3 dir = sunray.d_samplelights_[address]; // get the y-aligned direction
dir = global_func::local2world(dir, sunray.sun_dir_); // get the sun_direction-aligned direction
dir = -dir; // Since the sun direction is incident direction, reverse it
float3 orig = d_microhelio_center[myId / nLights]; // get the center of submirror
if (!NotCollision(orig, dir, grid, d_helio_vertexs))
return;
// Step 2: whether the reflect light is shadowed by other heliostats
float3 normal = d_microhelio_normals[myId / nLights];
int start_id = (myId / nLights - 1>0) ? (myId / nLights - 1) : 0;
address = (d_microhelio_start[start_id] + myId%nLights) % (nLights*nGroup);
float3 turbulence = sunray.d_perturbation_[address];
normal = global_func::local2world(turbulence, normal); normal = normalize(normal);
dir = -dir;
dir = reflect(dir, normal); // reflect light
dir = normalize(dir);
if (!NotCollision(orig, dir, grid, d_helio_vertexs))
return;
// Step 3: intersect with receiver
receiver_drawing(receiver, sunray, orig, dir, normal);
}
|
58d61ff5cb753437e0393a896b9f83f3779d8f37.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "graph.hpp"
#include <tbb/global_control.h>
#include <tbb/flow_graph.h>
struct cudaStream {
std::vector<std::vector<hipStream_t>> streams;
cudaStream(unsigned N) : streams(tf::cuda_get_num_devices()) {
for(size_t i=0; i<streams.size(); ++i) {
streams[i].resize(N);
tf::cudaScopedDevice ctx(i);
for(unsigned j=0; j<N; ++j) {
TF_CHECK_CUDA(hipStreamCreate(&streams[i][j]), "failed to create a stream on ", i);
}
}
}
~cudaStream() {
for(size_t i=0; i<streams.size(); ++i) {
tf::cudaScopedDevice ctx(i);
for(unsigned j=0; j<streams[i].size(); ++j) {
hipStreamDestroy(streams[i][j]);
}
}
}
hipStream_t per_thread_stream(int device) {
auto id = std::hash<std::thread::id>()(std::this_thread::get_id()) % streams[device].size();
return streams[device][id];
}
};
void TBB(const Graph& g, unsigned num_cpus, unsigned num_gpus) {
using namespace tbb;
using namespace tbb::flow;
tbb::global_control c(
tbb::global_control::max_allowed_parallelism, num_cpus + num_gpus
);
tbb::flow::graph G;
cudaStream streams(num_cpus + num_gpus);
std::atomic<int> counter {0};
const int N = 1000;
int* cx = new int[N];
int* cy = new int[N];
int* cz = new int[N];
int* gx = nullptr;
int* gy = nullptr;
int* gz = nullptr;
TF_CHECK_CUDA(hipMallocManaged(&gx, N*sizeof(int)), "failed at hipMallocManaged");
TF_CHECK_CUDA(hipMallocManaged(&gy, N*sizeof(int)), "failed at hipMallocManaged");
TF_CHECK_CUDA(hipMallocManaged(&gz, N*sizeof(int)), "failed at hipMallocManaged");
std::vector<std::unique_ptr<continue_node<continue_msg>>> tasks(g.num_nodes);
std::vector<size_t> indegree(g.num_nodes, 0);
auto source = std::make_unique<continue_node<continue_msg>>(
G, [](const continue_msg&){}
);
// create a task for each node
for(const auto& v : g.nodes) {
// cpu task
if(v.g == -1) {
tasks[v.v] = std::make_unique<continue_node<continue_msg>>(G,
[&](const continue_msg&){
for(int i=0; i<N; ++i) {
cz[i] = cx[i] + cy[i];
}
++counter;
}
);
}
else {
tasks[v.v] = std::make_unique<continue_node<continue_msg>>(G,
[&, tgt_device=v.g](const continue_msg&){
++counter;
int src_device = -1;
TF_CHECK_CUDA(hipGetDevice(&src_device), "get device failed");
TF_CHECK_CUDA(hipSetDevice(tgt_device), "set device failed");
hipGraph_t cuda_graph;
TF_CHECK_CUDA(cudaGraphCreate(&cuda_graph, 0), "cudaGraphCreate failed");
// memset parameter
cudaMemsetParams msetp;
msetp.value = 0;
msetp.pitch = 0;
msetp.elementSize = sizeof(int); // either 1, 2, or 4
msetp.width = N;
msetp.height = 1;
// sgx
hipGraphNode_t sgx;
msetp.dst = gx;
TF_CHECK_CUDA(cudaGraphAddMemsetNode(&sgx, cuda_graph, 0, 0, &msetp), "sgx failed");
// sgy
hipGraphNode_t sgy;
msetp.dst = gy;
TF_CHECK_CUDA(cudaGraphAddMemsetNode(&sgy, cuda_graph, 0, 0, &msetp), "sgy failed");
// sgz
hipGraphNode_t sgz;
msetp.dst = gz;
TF_CHECK_CUDA(cudaGraphAddMemsetNode(&sgz, cuda_graph, 0, 0, &msetp), "sgz failed");
// copy parameter
hipMemcpy3DParms h2dp;
h2dp.srcArray = nullptr;
h2dp.srcPos = ::make_hipPos(0, 0, 0);
h2dp.dstArray = nullptr;
h2dp.dstPos = ::make_hipPos(0, 0, 0);
h2dp.extent = ::make_hipExtent(N*sizeof(int), 1, 1);
h2dp.kind = hipMemcpyDefault;
// h2d_gx
hipGraphNode_t h2d_gx;
h2dp.srcPtr = ::make_hipPitchedPtr(cx, N*sizeof(int), N, 1);
h2dp.dstPtr = ::make_hipPitchedPtr(gx, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&h2d_gx, cuda_graph, 0, 0, &h2dp), "h2d_gx failed");
// h2d_gy
hipGraphNode_t h2d_gy;
h2dp.srcPtr = ::make_hipPitchedPtr(cy, N*sizeof(int), N, 1);
h2dp.dstPtr = ::make_hipPitchedPtr(gy, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&h2d_gy, cuda_graph, 0, 0, &h2dp), "h2d_gy failed");
// h2d_gz
hipGraphNode_t h2d_gz;
h2dp.srcPtr = ::make_hipPitchedPtr(cz, N*sizeof(int), N, 1);
h2dp.dstPtr = ::make_hipPitchedPtr(gz, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&h2d_gz, cuda_graph, 0, 0, &h2dp), "h2d_gz failed");
// kernel
cudaKernelNodeParams kp;
void* arguments[4] = { (void*)(&gx), (void*)(&gy), (void*)(&gz), (void*)(&N) };
kp.func = (void*)add<int>;
kp.gridDim = (N+255)/256;
kp.blockDim = 256;
kp.sharedMemBytes = 0;
kp.kernelParams = arguments;
kp.extra = nullptr;
hipGraphNode_t kernel;
TF_CHECK_CUDA(cudaGraphAddKernelNode(&kernel, cuda_graph, 0, 0, &kp), "kernel failed");
// d2hp
hipMemcpy3DParms d2hp;
d2hp.srcArray = nullptr;
d2hp.srcPos = ::make_hipPos(0, 0, 0);
d2hp.dstArray = nullptr;
d2hp.dstPos = ::make_hipPos(0, 0, 0);
d2hp.extent = ::make_hipExtent(N*sizeof(int), 1, 1);
d2hp.kind = hipMemcpyDefault;
// d2h_gx
hipGraphNode_t d2h_gx;
d2hp.srcPtr = ::make_hipPitchedPtr(gx, N*sizeof(int), N, 1);
d2hp.dstPtr = ::make_hipPitchedPtr(cx, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&d2h_gx, cuda_graph, 0, 0, &d2hp), "d2h_gx failed");
// d2h_gy
hipGraphNode_t d2h_gy;
d2hp.srcPtr = ::make_hipPitchedPtr(gy, N*sizeof(int), N, 1);
d2hp.dstPtr = ::make_hipPitchedPtr(cy, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&d2h_gy, cuda_graph, 0, 0, &d2hp), "d2h_gy failed");
// d2h_gz
hipGraphNode_t d2h_gz;
d2hp.srcPtr = ::make_hipPitchedPtr(gz, N*sizeof(int), N, 1);
d2hp.dstPtr = ::make_hipPitchedPtr(cz, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&d2h_gz, cuda_graph, 0, 0, &d2hp), "d2h_gz failed");
// add dependency
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &sgx, &h2d_gx, 1), "sgx->h2d_gx");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &sgy, &h2d_gy, 1), "sgy->h2d_gy");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &sgz, &h2d_gz, 1), "sgz->h2d_gz");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &h2d_gx, &kernel, 1), "h2d_gz->kernel");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &h2d_gy, &kernel, 1), "h2d_gz->kernel");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &h2d_gz, &kernel, 1), "h2d_gz->kernel");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &kernel, &d2h_gx, 1), "kernel->d2h_gx");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &kernel, &d2h_gy, 1), "kernel->d2h_gy");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &kernel, &d2h_gz, 1), "kernel->d2h_gz");
// launch the graph
hipGraphExec_t exe;
auto pts = streams.per_thread_stream(tgt_device);
TF_CHECK_CUDA(hipGraphInstantiate(&exe, cuda_graph, 0, 0, 0), "inst failed");
TF_CHECK_CUDA(hipGraphLaunch(exe, pts), "failed to launch cudaGraph");
TF_CHECK_CUDA(hipStreamSynchronize(pts), "failed to sync cudaStream");
TF_CHECK_CUDA(hipGraphExecDestroy(exe), "destroy exe failed");
TF_CHECK_CUDA(hipGraphDestroy(cuda_graph), "hipGraphDestroy failed");
TF_CHECK_CUDA(hipSetDevice(src_device), "set device failed");
}
);
}
}
for(const auto& e : g.edges) {
make_edge(*tasks[e.u], *tasks[e.v]);
indegree[e.v]++;
}
for(size_t i=0; i<indegree.size(); ++i) {
if(indegree[i] == 0) {
make_edge(*source, *tasks[i]);
}
}
source->try_put(continue_msg());
G.wait_for_all();
delete [] cx;
delete [] cy;
delete [] cz;
TF_CHECK_CUDA(hipFree(gx), "failed at hipFree");
TF_CHECK_CUDA(hipFree(gy), "failed at hipFree");
TF_CHECK_CUDA(hipFree(gz), "failed at hipFree");
if(counter != g.num_nodes) {
throw std::runtime_error("wrong result");
}
}
std::chrono::microseconds measure_time_tbb(
const Graph& g, unsigned num_cpus, unsigned num_gpus
) {
auto beg = std::chrono::high_resolution_clock::now();
TBB(g, num_cpus, num_gpus);
auto end = std::chrono::high_resolution_clock::now();
return std::chrono::duration_cast<std::chrono::microseconds>(end - beg);
}
|
58d61ff5cb753437e0393a896b9f83f3779d8f37.cu
|
#include "graph.hpp"
#include <tbb/global_control.h>
#include <tbb/flow_graph.h>
struct cudaStream {
std::vector<std::vector<cudaStream_t>> streams;
cudaStream(unsigned N) : streams(tf::cuda_get_num_devices()) {
for(size_t i=0; i<streams.size(); ++i) {
streams[i].resize(N);
tf::cudaScopedDevice ctx(i);
for(unsigned j=0; j<N; ++j) {
TF_CHECK_CUDA(cudaStreamCreate(&streams[i][j]), "failed to create a stream on ", i);
}
}
}
~cudaStream() {
for(size_t i=0; i<streams.size(); ++i) {
tf::cudaScopedDevice ctx(i);
for(unsigned j=0; j<streams[i].size(); ++j) {
cudaStreamDestroy(streams[i][j]);
}
}
}
cudaStream_t per_thread_stream(int device) {
auto id = std::hash<std::thread::id>()(std::this_thread::get_id()) % streams[device].size();
return streams[device][id];
}
};
void TBB(const Graph& g, unsigned num_cpus, unsigned num_gpus) {
using namespace tbb;
using namespace tbb::flow;
tbb::global_control c(
tbb::global_control::max_allowed_parallelism, num_cpus + num_gpus
);
tbb::flow::graph G;
cudaStream streams(num_cpus + num_gpus);
std::atomic<int> counter {0};
const int N = 1000;
int* cx = new int[N];
int* cy = new int[N];
int* cz = new int[N];
int* gx = nullptr;
int* gy = nullptr;
int* gz = nullptr;
TF_CHECK_CUDA(cudaMallocManaged(&gx, N*sizeof(int)), "failed at cudaMallocManaged");
TF_CHECK_CUDA(cudaMallocManaged(&gy, N*sizeof(int)), "failed at cudaMallocManaged");
TF_CHECK_CUDA(cudaMallocManaged(&gz, N*sizeof(int)), "failed at cudaMallocManaged");
std::vector<std::unique_ptr<continue_node<continue_msg>>> tasks(g.num_nodes);
std::vector<size_t> indegree(g.num_nodes, 0);
auto source = std::make_unique<continue_node<continue_msg>>(
G, [](const continue_msg&){}
);
// create a task for each node
for(const auto& v : g.nodes) {
// cpu task
if(v.g == -1) {
tasks[v.v] = std::make_unique<continue_node<continue_msg>>(G,
[&](const continue_msg&){
for(int i=0; i<N; ++i) {
cz[i] = cx[i] + cy[i];
}
++counter;
}
);
}
else {
tasks[v.v] = std::make_unique<continue_node<continue_msg>>(G,
[&, tgt_device=v.g](const continue_msg&){
++counter;
int src_device = -1;
TF_CHECK_CUDA(cudaGetDevice(&src_device), "get device failed");
TF_CHECK_CUDA(cudaSetDevice(tgt_device), "set device failed");
cudaGraph_t cuda_graph;
TF_CHECK_CUDA(cudaGraphCreate(&cuda_graph, 0), "cudaGraphCreate failed");
// memset parameter
cudaMemsetParams msetp;
msetp.value = 0;
msetp.pitch = 0;
msetp.elementSize = sizeof(int); // either 1, 2, or 4
msetp.width = N;
msetp.height = 1;
// sgx
cudaGraphNode_t sgx;
msetp.dst = gx;
TF_CHECK_CUDA(cudaGraphAddMemsetNode(&sgx, cuda_graph, 0, 0, &msetp), "sgx failed");
// sgy
cudaGraphNode_t sgy;
msetp.dst = gy;
TF_CHECK_CUDA(cudaGraphAddMemsetNode(&sgy, cuda_graph, 0, 0, &msetp), "sgy failed");
// sgz
cudaGraphNode_t sgz;
msetp.dst = gz;
TF_CHECK_CUDA(cudaGraphAddMemsetNode(&sgz, cuda_graph, 0, 0, &msetp), "sgz failed");
// copy parameter
cudaMemcpy3DParms h2dp;
h2dp.srcArray = nullptr;
h2dp.srcPos = ::make_cudaPos(0, 0, 0);
h2dp.dstArray = nullptr;
h2dp.dstPos = ::make_cudaPos(0, 0, 0);
h2dp.extent = ::make_cudaExtent(N*sizeof(int), 1, 1);
h2dp.kind = cudaMemcpyDefault;
// h2d_gx
cudaGraphNode_t h2d_gx;
h2dp.srcPtr = ::make_cudaPitchedPtr(cx, N*sizeof(int), N, 1);
h2dp.dstPtr = ::make_cudaPitchedPtr(gx, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&h2d_gx, cuda_graph, 0, 0, &h2dp), "h2d_gx failed");
// h2d_gy
cudaGraphNode_t h2d_gy;
h2dp.srcPtr = ::make_cudaPitchedPtr(cy, N*sizeof(int), N, 1);
h2dp.dstPtr = ::make_cudaPitchedPtr(gy, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&h2d_gy, cuda_graph, 0, 0, &h2dp), "h2d_gy failed");
// h2d_gz
cudaGraphNode_t h2d_gz;
h2dp.srcPtr = ::make_cudaPitchedPtr(cz, N*sizeof(int), N, 1);
h2dp.dstPtr = ::make_cudaPitchedPtr(gz, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&h2d_gz, cuda_graph, 0, 0, &h2dp), "h2d_gz failed");
// kernel
cudaKernelNodeParams kp;
void* arguments[4] = { (void*)(&gx), (void*)(&gy), (void*)(&gz), (void*)(&N) };
kp.func = (void*)add<int>;
kp.gridDim = (N+255)/256;
kp.blockDim = 256;
kp.sharedMemBytes = 0;
kp.kernelParams = arguments;
kp.extra = nullptr;
cudaGraphNode_t kernel;
TF_CHECK_CUDA(cudaGraphAddKernelNode(&kernel, cuda_graph, 0, 0, &kp), "kernel failed");
// d2hp
cudaMemcpy3DParms d2hp;
d2hp.srcArray = nullptr;
d2hp.srcPos = ::make_cudaPos(0, 0, 0);
d2hp.dstArray = nullptr;
d2hp.dstPos = ::make_cudaPos(0, 0, 0);
d2hp.extent = ::make_cudaExtent(N*sizeof(int), 1, 1);
d2hp.kind = cudaMemcpyDefault;
// d2h_gx
cudaGraphNode_t d2h_gx;
d2hp.srcPtr = ::make_cudaPitchedPtr(gx, N*sizeof(int), N, 1);
d2hp.dstPtr = ::make_cudaPitchedPtr(cx, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&d2h_gx, cuda_graph, 0, 0, &d2hp), "d2h_gx failed");
// d2h_gy
cudaGraphNode_t d2h_gy;
d2hp.srcPtr = ::make_cudaPitchedPtr(gy, N*sizeof(int), N, 1);
d2hp.dstPtr = ::make_cudaPitchedPtr(cy, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&d2h_gy, cuda_graph, 0, 0, &d2hp), "d2h_gy failed");
// d2h_gz
cudaGraphNode_t d2h_gz;
d2hp.srcPtr = ::make_cudaPitchedPtr(gz, N*sizeof(int), N, 1);
d2hp.dstPtr = ::make_cudaPitchedPtr(cz, N*sizeof(int), N, 1);
TF_CHECK_CUDA(cudaGraphAddMemcpyNode(&d2h_gz, cuda_graph, 0, 0, &d2hp), "d2h_gz failed");
// add dependency
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &sgx, &h2d_gx, 1), "sgx->h2d_gx");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &sgy, &h2d_gy, 1), "sgy->h2d_gy");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &sgz, &h2d_gz, 1), "sgz->h2d_gz");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &h2d_gx, &kernel, 1), "h2d_gz->kernel");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &h2d_gy, &kernel, 1), "h2d_gz->kernel");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &h2d_gz, &kernel, 1), "h2d_gz->kernel");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &kernel, &d2h_gx, 1), "kernel->d2h_gx");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &kernel, &d2h_gy, 1), "kernel->d2h_gy");
TF_CHECK_CUDA(cudaGraphAddDependencies(cuda_graph, &kernel, &d2h_gz, 1), "kernel->d2h_gz");
// launch the graph
cudaGraphExec_t exe;
auto pts = streams.per_thread_stream(tgt_device);
TF_CHECK_CUDA(cudaGraphInstantiate(&exe, cuda_graph, 0, 0, 0), "inst failed");
TF_CHECK_CUDA(cudaGraphLaunch(exe, pts), "failed to launch cudaGraph");
TF_CHECK_CUDA(cudaStreamSynchronize(pts), "failed to sync cudaStream");
TF_CHECK_CUDA(cudaGraphExecDestroy(exe), "destroy exe failed");
TF_CHECK_CUDA(cudaGraphDestroy(cuda_graph), "cudaGraphDestroy failed");
TF_CHECK_CUDA(cudaSetDevice(src_device), "set device failed");
}
);
}
}
for(const auto& e : g.edges) {
make_edge(*tasks[e.u], *tasks[e.v]);
indegree[e.v]++;
}
for(size_t i=0; i<indegree.size(); ++i) {
if(indegree[i] == 0) {
make_edge(*source, *tasks[i]);
}
}
source->try_put(continue_msg());
G.wait_for_all();
delete [] cx;
delete [] cy;
delete [] cz;
TF_CHECK_CUDA(cudaFree(gx), "failed at cudaFree");
TF_CHECK_CUDA(cudaFree(gy), "failed at cudaFree");
TF_CHECK_CUDA(cudaFree(gz), "failed at cudaFree");
if(counter != g.num_nodes) {
throw std::runtime_error("wrong result");
}
}
std::chrono::microseconds measure_time_tbb(
const Graph& g, unsigned num_cpus, unsigned num_gpus
) {
auto beg = std::chrono::high_resolution_clock::now();
TBB(g, num_cpus, num_gpus);
auto end = std::chrono::high_resolution_clock::now();
return std::chrono::duration_cast<std::chrono::microseconds>(end - beg);
}
|
ea39e2621aab30d27f9bb9dd11f9315b080fafab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
__device__ float arraysum(float *x, int n) {
float sum = 0;
for (int i = 0; i < n; i++) {
sum += x[i];
}
return sum;
}
__global__ void deviceburst(float *x, int n, int k, float *bigmaxs, int *startend) {
int partition = (n - k + 1) / (blockDim.x * gridDim.x) + 1;
int me = blockIdx.x * blockDim.x + threadIdx.x;
int left = me * partition;
int left_limit = left + partition;
int length = k;
float sum = arraysum(x + left, length);
float mean = sum / length;
startend[me * 2] = left;
startend[me * 2 + 1] = left + length - 1;
bigmaxs[me] = mean;
while (left + length < n && left < left_limit) {
float next = x[left + length];
if (next > mean) {
if (next > x[left]) {
sum = sum + next - x[left];
left += 1;
} else {
sum = sum + next;
length += 1;
}
} else {
left += length - k + 1;
length = k;
sum = arraysum(x + left, length);
}
mean = sum / length;
if (mean > bigmaxs[me]) {
startend[me * 2] = left;
startend[me * 2 + 1] = left + length - 1;
bigmaxs[me] = mean;
}
}
}
int arraymaxidx(float *x, int n) {
float max = x[0];
int maxidx = 0;
for (int i = 1; i < n; i++) {
if (x[i] > max) {
max = x[i];
maxidx = i;
}
}
return maxidx;
}
void maxburst(float *x, int n, int k, int *startend, float *bigmax) {
int gridDimX = 128;
int blockDimX = 256 / ((k - 1) / 16000 + 1);
int threads_count = gridDimX * blockDimX;
float *device_x;
hipMalloc((void **)&device_x, sizeof(float) * n);
hipMemcpy(device_x, x, sizeof(float) * n, hipMemcpyHostToDevice);
float *device_bigmaxs;
hipMalloc((void **)&device_bigmaxs, sizeof(float) * threads_count);
int *device_startends;
hipMalloc((void **)&device_startends, sizeof(int) * threads_count * 2);
dim3 dimGrid(gridDimX, 1);
dim3 dimBlock(blockDimX, 1, 1);
hipLaunchKernelGGL(( deviceburst), dim3(dimGrid), dim3(dimBlock), 0, 0, device_x, n, k, device_bigmaxs, device_startends);
hipDeviceSynchronize();
hipFree(device_x);
float *bigmaxs = (float *)malloc(sizeof(float) * threads_count);
hipMemcpy(bigmaxs, device_bigmaxs, sizeof(float) * threads_count, hipMemcpyDeviceToHost);
hipFree(device_bigmaxs);
int *startends = (int *)malloc(sizeof(int) * threads_count * 2);
hipMemcpy(startends, device_startends, sizeof(int) * threads_count * 2, hipMemcpyDeviceToHost);
hipFree(device_startends);
int maxidx = arraymaxidx(bigmaxs, threads_count);
bigmax[0] = bigmaxs[maxidx];
startend[0] = startends[maxidx * 2];
startend[1] = startends[maxidx * 2 + 1];
}
#include <stdio.h>
#include <sys/time.h>
int main() {
int n = 450000;
int k = 4000;
float *x = (float *)malloc(sizeof(float) * n);
srand(0);
for (int i = 0; i < n; i++) {
x[i] = (float)rand() / (float)(RAND_MAX / 100.0);
}
int startend[] = {0, 0};
float bigmax = 0;
struct timeval start;
gettimeofday(&start, NULL);
maxburst(x, n, k, startend, &bigmax);
struct timeval end;
gettimeofday(&end, NULL);
float duration = (end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0;
printf("%f (from %d to %d) (%fms)\n", bigmax, startend[0], startend[1], duration);
}
|
ea39e2621aab30d27f9bb9dd11f9315b080fafab.cu
|
#include <cuda.h>
__device__ float arraysum(float *x, int n) {
float sum = 0;
for (int i = 0; i < n; i++) {
sum += x[i];
}
return sum;
}
__global__ void deviceburst(float *x, int n, int k, float *bigmaxs, int *startend) {
int partition = (n - k + 1) / (blockDim.x * gridDim.x) + 1;
int me = blockIdx.x * blockDim.x + threadIdx.x;
int left = me * partition;
int left_limit = left + partition;
int length = k;
float sum = arraysum(x + left, length);
float mean = sum / length;
startend[me * 2] = left;
startend[me * 2 + 1] = left + length - 1;
bigmaxs[me] = mean;
while (left + length < n && left < left_limit) {
float next = x[left + length];
if (next > mean) {
if (next > x[left]) {
sum = sum + next - x[left];
left += 1;
} else {
sum = sum + next;
length += 1;
}
} else {
left += length - k + 1;
length = k;
sum = arraysum(x + left, length);
}
mean = sum / length;
if (mean > bigmaxs[me]) {
startend[me * 2] = left;
startend[me * 2 + 1] = left + length - 1;
bigmaxs[me] = mean;
}
}
}
int arraymaxidx(float *x, int n) {
float max = x[0];
int maxidx = 0;
for (int i = 1; i < n; i++) {
if (x[i] > max) {
max = x[i];
maxidx = i;
}
}
return maxidx;
}
void maxburst(float *x, int n, int k, int *startend, float *bigmax) {
int gridDimX = 128;
int blockDimX = 256 / ((k - 1) / 16000 + 1);
int threads_count = gridDimX * blockDimX;
float *device_x;
cudaMalloc((void **)&device_x, sizeof(float) * n);
cudaMemcpy(device_x, x, sizeof(float) * n, cudaMemcpyHostToDevice);
float *device_bigmaxs;
cudaMalloc((void **)&device_bigmaxs, sizeof(float) * threads_count);
int *device_startends;
cudaMalloc((void **)&device_startends, sizeof(int) * threads_count * 2);
dim3 dimGrid(gridDimX, 1);
dim3 dimBlock(blockDimX, 1, 1);
deviceburst<<<dimGrid, dimBlock>>>(device_x, n, k, device_bigmaxs, device_startends);
cudaThreadSynchronize();
cudaFree(device_x);
float *bigmaxs = (float *)malloc(sizeof(float) * threads_count);
cudaMemcpy(bigmaxs, device_bigmaxs, sizeof(float) * threads_count, cudaMemcpyDeviceToHost);
cudaFree(device_bigmaxs);
int *startends = (int *)malloc(sizeof(int) * threads_count * 2);
cudaMemcpy(startends, device_startends, sizeof(int) * threads_count * 2, cudaMemcpyDeviceToHost);
cudaFree(device_startends);
int maxidx = arraymaxidx(bigmaxs, threads_count);
bigmax[0] = bigmaxs[maxidx];
startend[0] = startends[maxidx * 2];
startend[1] = startends[maxidx * 2 + 1];
}
#include <stdio.h>
#include <sys/time.h>
int main() {
int n = 450000;
int k = 4000;
float *x = (float *)malloc(sizeof(float) * n);
srand(0);
for (int i = 0; i < n; i++) {
x[i] = (float)rand() / (float)(RAND_MAX / 100.0);
}
int startend[] = {0, 0};
float bigmax = 0;
struct timeval start;
gettimeofday(&start, NULL);
maxburst(x, n, k, startend, &bigmax);
struct timeval end;
gettimeofday(&end, NULL);
float duration = (end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0;
printf("%f (from %d to %d) (%fms)\n", bigmax, startend[0], startend[1], duration);
}
|
4ae2ce3e19fb21cfbbdafe52e541cfebb5f970dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "FindHaunted.h"
#include <hip/hip_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "CUDAKernelOptions.cu"
#include <iostream>
using namespace OpenSteer;
using namespace std;
__global__ void
findHauntedKernel(VehicleData *vehicleData, float3 *seekVectors, unsigned int stride);
OpenSteer::FindHaunted::FindHaunted(unsigned int stride)
{
d_seekVectors = NULL;
threadsPerBlock = 128;
this->stride = stride;
}
OpenSteer::FindHaunted::~FindHaunted() {}
void OpenSteer::FindHaunted::init()
{
// device memory for seek vector
mem_size_seek_vectors = getNumberOfAgents()*sizeof(float3);
hipError_t retval = hipMalloc((void **)&d_seekVectors, mem_size_seek_vectors);
if (retval != hipSuccess)
cout << "Error while allocating d_seekVectors memory: " << hipGetErrorString(retval) << endl;
}
void OpenSteer::FindHaunted::run()
{
hipLaunchKernelGGL(( findHauntedKernel), dim3(gridDim()), dim3(blockDim()), 0, 0, getVehicleData(), d_seekVectors, stride);
}
void OpenSteer::FindHaunted::close()
{
if (d_seekVectors != NULL) {
hipFree(d_seekVectors);
d_seekVectors = NULL;
}
}
|
4ae2ce3e19fb21cfbbdafe52e541cfebb5f970dd.cu
|
#include "FindHaunted.h"
#include <cuda_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "CUDAKernelOptions.cu"
#include <iostream>
using namespace OpenSteer;
using namespace std;
__global__ void
findHauntedKernel(VehicleData *vehicleData, float3 *seekVectors, unsigned int stride);
OpenSteer::FindHaunted::FindHaunted(unsigned int stride)
{
d_seekVectors = NULL;
threadsPerBlock = 128;
this->stride = stride;
}
OpenSteer::FindHaunted::~FindHaunted() {}
void OpenSteer::FindHaunted::init()
{
// device memory for seek vector
mem_size_seek_vectors = getNumberOfAgents()*sizeof(float3);
cudaError_t retval = cudaMalloc((void **)&d_seekVectors, mem_size_seek_vectors);
if (retval != cudaSuccess)
cout << "Error while allocating d_seekVectors memory: " << cudaGetErrorString(retval) << endl;
}
void OpenSteer::FindHaunted::run()
{
findHauntedKernel<<<gridDim(), blockDim()>>>(getVehicleData(), d_seekVectors, stride);
}
void OpenSteer::FindHaunted::close()
{
if (d_seekVectors != NULL) {
cudaFree(d_seekVectors);
d_seekVectors = NULL;
}
}
|
a1088d8e1c7c40ce671cf9c2e8fe510c54240a17.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "LocalConnect.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
#include "../layers/BranchLayer.h"
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation_kernelSize1(
float* _curDelta,
float**_w,
float* _nextDelta,
int dim,
int area,
int localKernelSize);
/*
* block = dim3(outputAmount, kernelSize * kernelSize);
* thread= dim3(batch);
*/
__global__ void g_LocalConnect_wgrad_Add(
float** _WgradTmp,
float** Wgrad,
float** w,
int kernelSize,
int batch,
float lambda,
int wgradTmpArea,
int wgradArea,
int wArea);
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_wgrad_kernelSize1(
float* _inputs,
float* _curDelta,
float** _wgradTmp,
/*float** _w,*/
int dim,
int area,
int batch,
float lambda);
/*
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_1(
float** arrayS,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputDim,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
template <int OUTPUTDIM2, int THREADS>
__global__ void g_LocalConnect_feedforward_s_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
* const kernelsize = 1
*/
__global__ void g_LocalConnect_feedforward_kernelSize1_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int dim,
int area,
int batch,
int k1Amount,
int localKernelSize);
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation(
float* _convDelta,
float**_w,
float* _poolDelta,
int _convOutputSize,
int _poolOutputSize,
int _kernelAmount1,
int _kernelAmount2,
int _kernelSize,
int _convDeltaArea,
int _poolDeltaArea,
int localKernelSize);
/*
*function: get convolution layer and pooling output
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(min(outputDim * outputDim, 256));
*/
__global__ void g_LocalConnect_feedforward_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
/*
* blocks : dim3(batch, cuKernelScan[cl] * localKernelSize, Config::instance()->getChannels()),
* threads : dim3(threadidx)
*/
__global__ void g_LocalConnect_wgrad(
float* _inputs,
float* _curDelta,
float** _wgrad,
int inputDim,
int curDeltaDim,
int kernelSize,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea,
int batch,
float lambda);
/*
* blocks : dim3(batch, cuKernelScan[cl], Config::instance()->getChannels()),
* threads : dim3(threadidx)
*/
__global__ void g_LocalConnect_wgrad_1(float** sArray,
float* convDelta,
float* WgradTmp,
int imgSize,
int convOutputSize,
int kernelAmount2,
int kernelSize,
int sArrayArea,
int convDeltaArea,
int wgrapTmpArea,
int localKernelSize);
/*
*block = dim3(localKernelSize, amount);
*thread= dim3(batch);
*
*/
__global__ void g_LocalConnect_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea,
int localKernelSize);
void LocalConnect::calCost()
{
cost->gpuClear();
hipLaunchKernelGGL(( g_getCost_3), dim3(dim3(w.size())), dim3(dim3(32)), sizeof(float) * 32, 0, cost->getDev(),
w.m_devPoint,
lambda,
w[0]->getLen());
hipStreamSynchronize(0);
getLastCudaError("LocalConnect:getCost");
}
void LocalConnect::feedforward()
{
if((kernelSize == 3 || kernelSize == 5) && inputDim >= 4 && inputDim <= 8){
dim3 block = dim3(batch, outputAmount);
const int threads = 8;
dim3 thread= dim3(threads, outputDim * outputDim);
if(outputDim == 4){
hipLaunchKernelGGL(( g_LocalConnect_feedforward_s_2<16, threads>), dim3(block), dim3(thread), 0, 0, inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 5){
hipLaunchKernelGGL(( g_LocalConnect_feedforward_s_2<25, threads>), dim3(block), dim3(thread), 0, 0, inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 6){
hipLaunchKernelGGL(( g_LocalConnect_feedforward_s_2<36, threads>), dim3(block), dim3(thread), 0, 0, inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 7){
hipLaunchKernelGGL(( g_LocalConnect_feedforward_s_2<49, threads>), dim3(block), dim3(thread), 0, 0, inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 8){
hipLaunchKernelGGL(( g_LocalConnect_feedforward_s_2<64, threads>), dim3(block), dim3(thread), 0, 0, inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_s_2");
}
else if(kernelSize == 1){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
hipLaunchKernelGGL(( g_LocalConnect_feedforward_kernelSize1_2), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
inputs->getArea(),
batch,
outputAmount,
localKernelSize);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_kernelSize1_2");
}
else {
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(8, min(outputDim * outputDim, 64));
hipLaunchKernelGGL(( g_LocalConnect_feedforward_2), dim3(block), dim3(thread),
sizeof(float) * outputDim * outputDim, 0,
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
kernelSize,
outputDim,
inputs->getArea(),
outputs->getArea(),
batch,
outputAmount,
localKernelSize);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_2");
}
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
hipLaunchKernelGGL(( g_nonLinearity), dim3(block), dim3(thread), 0, 0,
outputs->getDev(),
outputs->getLen(),
NON_LINEARITY);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("LocalConnect::g_nonLinearity");
}
}
void LocalConnect::backpropagation()
{
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
hipLaunchKernelGGL(( g_dnonLinearity), dim3(block), dim3(thread), 0, 0, curDelta->getDev(),
outputs->getDev(), curDelta->getLen(), NON_LINEARITY);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("LocalConnect::g_dnonLinearity");
}
if(inputs){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
preDelta->gpuClear();
if(kernelSize == 1){
hipLaunchKernelGGL(( g_LocalConnect_backpropagation_kernelSize1), dim3(block), dim3(thread), 0, 0,
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
curDelta->getArea(),
localKernelSize);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("LocalConnect::g_LocalConnect_backpropagation_kernelSize1");
}else{
hipLaunchKernelGGL(( g_LocalConnect_backpropagation), dim3(block), dim3(thread), 0, 0,
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
inputDim,
inputAmount,
outputAmount,
kernelSize,
curDelta->getArea(),
preDelta->getArea(),
localKernelSize);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("LocalConnect::g_LocalConnect_backpropagation");
}
}
}
void LocalConnect::getGrad()
{
if(kernelSize == 1){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
hipLaunchKernelGGL(( g_LocalConnect_wgrad_kernelSize1), dim3(block), dim3(thread), sizeof(float) * batch, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
inputs->getArea(),
batch,
lambda);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_LocalConnect_wgrad_kernelSize1");
block = dim3(outputAmount, kernelSize * kernelSize);
thread = dim3(batch);
}
else{
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(9, min(outputDim * outputDim, 64));
hipLaunchKernelGGL(( g_LocalConnect_wgrad), dim3(block), dim3(thread), sizeof(float) * inputDim * inputDim, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
kernelSize,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea(),
batch,
lambda);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_LocalConnect_wgrad");
}
dim3 block = dim3(outputAmount * localKernelSize, kernelSize * kernelSize);
dim3 thread = dim3(batch);
hipLaunchKernelGGL(( g_LocalConnect_wgrad_Add), dim3(block), dim3(thread), sizeof(float) * batch, 0,
wgradTmp.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
kernelSize,
batch,
lambda,
wgradTmp[0]->getArea(),
wgrad[0]->getArea(),
w[0]->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_LocalConnect_wgrad_Add");
block = dim3(localKernelSize, outputAmount);
thread= dim3(batch);
hipLaunchKernelGGL(( g_LocalConnect_Bgrad), dim3(block),dim3(thread),sizeof(float) * batch, 0,
curDelta->getDev(),
bgrad.m_devPoint,
outputDim,
outputAmount,
batch,
curDelta->getArea(),
localKernelSize);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("LocalConnect::getGrad::g_LocalConnect_Bgrad");
}
void LocalConnect::updateWeight()
{
dim3 thread = min(256, w[0]->getLen());
dim3 block = momentum_w.size();
hipLaunchKernelGGL(( g_vecAdd), dim3(block), dim3(thread), 0, 0, momentum_w.m_devPoint, wgrad.m_devPoint, w.m_devPoint,
momentum_b.m_devPoint, bgrad.m_devPoint, b.m_devPoint,
w[0]->getLen(), b[0]->getLen(),
Config::instance()->getMomentum(),
Config::instance()->getLrate(), Config::instance()->getLrate());
}
LocalConnect::LocalConnect(std::string name)
{
m_name = name;
ConfigLocal* config = static_cast<ConfigLocal*>(Config::instance()->getLayerByName(m_name));
ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getOutputs();
if(inputs == NULL){
/*inputs = NULL the type must be BranchLayers*/
Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer());
Assert(config->m_subInput != std::string("NULL"));
BranchLayer* bl = static_cast<BranchLayer*>(preLayer);
inputs = bl->getSubOutput(config->m_subInput);
preDelta = bl->getSubCurDelta(config->m_subInput);
}else{
preDelta = preLayer->getCurDelta();
}
inputAmount = preLayer->outputAmount;
outputAmount = inputAmount;
kernelSize = config->m_kernelSize;
inputDim = preLayer->outputDim;
outputDim = inputDim;
batch = Config::instance()->getBatchSize();
lambda = config->m_weightDecay;
NON_LINEARITY = config->m_nonLinearity;
localKernelSize = outputDim * outputDim;
outputs = new cuMatrix<float> (batch, outputDim * outputDim, outputAmount);
curDelta = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
for(int i = 0; i < outputAmount * localKernelSize; i++){
w.push_back(new cuMatrix<float>(kernelSize, kernelSize, 1));
b.push_back(new cuMatrix<float>(1, 1, 1));
wgrad.push_back(new cuMatrix<float>(kernelSize, kernelSize, 1));
bgrad.push_back(new cuMatrix<float>(1, 1, 1));
wgradTmp.push_back(new cuMatrix<float>(batch, kernelSize * kernelSize, 1));
}
w.toGpu();
b.toGpu();
wgrad.toGpu();
bgrad.toGpu();
wgradTmp.toGpu();
for(int i = 0; i < outputAmount * localKernelSize; i++){
momentum_w.push_back(new cuMatrix<float>(kernelSize, kernelSize, 1));
momentum_b.push_back(new cuMatrix<float>(1, 1, 1));
}
momentum_w.toGpu();
momentum_b.toGpu();
this->initRandom();
Layers::instance()->set(m_name, this);
}
void LocalConnect::save(FILE* file)
{
for(int a = 0; a < (int)w.size(); a++){
w[a]->toCpu();
b[a]->toCpu();
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fprintf(file, "%f ", w[a]->get(i, j, c));
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
fprintf(file, "%f ", b[a]->get(i, j, c));
}
}
}
}
}
void LocalConnect::clearMomentum()
{
for(int i = 0; i < (int)momentum_b.size(); i++){
momentum_b[i]->gpuClear();
}
for(int i = 0; i < (int)momentum_w.size(); i++){
momentum_w[i]->gpuClear();
}
}
void LocalConnect::initRandom()
{
//srand(clock());
float initW = Config::instance()->getLayerByName(m_name)->m_initW;
if(Config::instance()->getLayerByName(m_name)->isGaussian()){
for(int i = 0; i < (int)w.size(); i++){
float epsilon = initW;
for(int c = 0; c < w[i]->channels; c++)
{
float r1 = 0.01f + 5.0f * (rand()) / RAND_MAX;
float r2 = 0.01f + 5.0f * (rand()) / RAND_MAX;
createGaussian(w[i]->getHost() + c * w[i]->getArea(), r1,r2,
kernelSize, kernelSize, w[i]->channels,
epsilon);
}
w[i]->toGpu();
}
}
else{
for(int i = 0; i < (int)w.size(); i++){
for(int j = 0; j < w[i]->getLen(); j++){
w[i]->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f);
//printf("%f ", w[i]->hostData[j]);
}//printf("\n");
w[i]->toGpu();
}
}
}
void LocalConnect::initFromCheckpoint(FILE* file)
{
float val = 0;
for(int a = 0; a < (int)w.size(); a++){
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF){
LOG("scanf fail", "result/log.txt");
}
w[a]->set(i, j, c, val);
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF){
LOG("scanf fail", "result/log.txt");
}
b[a]->set(i, j, c, val);
}
}
}
w[a]->toGpu();
b[a]->toGpu();
}
}
/*
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_1(
float** arrayS,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputDim,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
extern __shared__ float image[];
int sp = blockIdx.x;
int k = blockIdx.y;
int OutputSize2 = outputDim * outputDim;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
float* curInput = arrayS[sp] + k * inputSize2;
float* curOutput = _output + outputArea * k + sp * OutputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < OutputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < OutputSize2)
{
int x = tyid / outputDim;
int y = tyid % outputDim;
float val = 0.0;
float* w = arrayW[k * localKernelSize + tyid];
float b = arrayB[k * localKernelSize + tyid][0];
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
curOutput[tyid] = val + b;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(8, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
extern __shared__ float image[];
int sp = blockIdx.x;
int k = blockIdx.y;
int outputSize2 = outputSize * outputSize;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
float* curInput = inputs + k * inputArea + sp * inputSize2;
float* curOutput = _output + k * outputArea + sp * outputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
curOutput[id] = 0;
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < outputSize2)
{
int x = tyid / outputSize;
int y = tyid % outputSize;
float val = 0.0;
float* w = arrayW[k * localKernelSize + tyid];
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
atomicAdd(curOutput + tyid, val);
}
}
__syncthreads();
for(int i = 0; i < outputSize2; i += blockDim.y * blockDim.x)
{
int id = i + threadIdx.y * blockDim.x + threadIdx.x;
if(id < outputSize2)
{
float b = arrayB[k * localKernelSize + id][0];
curOutput[id] += b;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
* const kernelsize = 1
*/
__global__ void g_LocalConnect_feedforward_kernelSize1_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int dim,
int area,
int batch,
int k1Amount,
int localKernelSize)
{
int sp = blockIdx.x;
int k = blockIdx.y;
int outputSize2 = dim * dim;
int inputSize2 = dim * dim;
float* curInput = inputs + k * area + sp * inputSize2;
float* curOutput = _output + k * area + sp * outputSize2;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.x)
{
int tyid = ty + threadIdx.x;
if(tyid < outputSize2)
{
int skip = k * localKernelSize + tyid;
float val = 0.0;
float w = arrayW[skip][0];
float b = arrayB[skip][0];
val = curInput[tyid] * w + b;
curOutput[tyid] = val ;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(8, min(outputDim * outputDim, 64));
2<64, 9, 8, 8, 64>
*/
template <int OUTPUTDIM2, int THREADS>
__global__ void g_LocalConnect_feedforward_s_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
__shared__ float image[OUTPUTDIM2];
int sp = blockIdx.x;
int k = blockIdx.y;
__shared__ float convSum[OUTPUTDIM2][THREADS];
int outputSize2 = outputSize * outputSize;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
float* curInput = inputs + k * inputArea + sp * inputSize2;
float* curOutput = _output + k * outputArea + sp * outputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < outputSize2)
{
int x = tyid / outputSize;
int y = tyid % outputSize;
float val = 0.0;
float* w = arrayW[k * localKernelSize + tyid];
float* _convSum = convSum[threadIdx.y];
float b = arrayB[k * localKernelSize + tyid][0];
_convSum[threadIdx.x] = 0;
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
_convSum[threadIdx.x] = val;
__syncthreads();
#pragma unroll
for(int len = THREADS; len != 1; len = (len + 1) >> 1){
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) _convSum[threadIdx.x] += _convSum[threadIdx.x + skip];
__syncthreads();
}
if(threadIdx.x == 0)
curOutput[tyid] = _convSum[0] + b;
}
}
}
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation_kernelSize1(
float* _curDelta,
float**_w,
float* _nextDelta,
int dim,
int area,
int localKernelSize)
{
int s = blockIdx.x;
int k = blockIdx.y;
int dim2 = dim * dim;
int skip = k * area + s * dim2;
float* curDelta = _curDelta + skip;
float* nxtDelta = _nextDelta + skip;
for (int tidx = 0; tidx < dim2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < dim2) {
float val = 0.0;
float w = _w[k * localKernelSize + idx][0];
val = curDelta[idx] * w;
nxtDelta[idx] = val;
}
}
}
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation (
float* _convDelta,
float**_w,
float* _poolDelta,
int _convOutputSize,
int _poolOutputSize,
int _kernelAmount1,
int _kernelAmount2,
int _kernelSize,
int _convDeltaArea,
int _poolDeltaArea,
int localKernelSize)
{
int curSize = _convOutputSize;
int wSize = _kernelSize;
int nxtSize = _poolOutputSize;
int s = blockIdx.x;
int k = blockIdx.y;
int curSize2 = curSize * curSize;
int nxtSize2 = nxtSize * nxtSize;
float* curDelta = _convDelta + k * _convDeltaArea + s * curSize2;
float* nxtDelta = _poolDelta + k * _poolDeltaArea + s * nxtSize2;
int half = wSize >> 1;
for (int tidx = 0; tidx < nxtSize2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < nxtSize2) {
int i = idx / nxtSize;
int j = idx % nxtSize;
float val = 0.0;
for (int x = 0; x < wSize; x++) {
for (int y = 0; y < wSize; y++) {
int cx = i + (half - x);
int cy = j + (half - y);
int wx = x;
int wy = y;
if(cx >= 0 && cx < curSize && cy >= 0 && cy < curSize){
float* w = _w[k * localKernelSize + cx * curSize + cy];
val += curDelta[cx * curSize + cy] * w[wx * wSize + wy];
}
}
}
nxtDelta[idx] = val;
}
}
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
*/
__global__ void g_LocalConnect_wgrad_kernelSize1(
float* _inputs,
float* _curDelta,
float** _wgradTmp,
int dim,
int area,
int batch,
float lambda)
{
int b = blockIdx.x;
int k = blockIdx.y;
int dim2 = dim * dim;
int skip = k * area + b * dim2;
float* input = _inputs + skip;
float* curDelta = _curDelta + skip;
for(int y = 0; y < dim2; y += blockDim.x){
int yid = y + threadIdx.x;
if(yid < dim2){
skip = k * dim2 + yid;
float val = input[yid] * curDelta[yid];
//_wgradTmp[skip][0] = val / batch + lambda * _w[skip][0];
_wgradTmp[skip][0] = val;
}
}
}
/*
*dim3 block = dim3(batch, outputAmount);
*dim3 thread= min(9, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_wgrad(
float* _inputs,
float* _curDelta,
float** _wgradTmp,
/*float** _w,*/
int inputDim,
int curDeltaDim,
int kernelSize,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea,
int batch,
float lambda)
{
int sp = blockIdx.x;
int k = blockIdx.y;
extern __shared__ float image[];
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
int kernelSize2 = kernelSize * kernelSize;
float* input = _inputs + k * inputArea + sp * inputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = input[id];
}
}
__syncthreads();
float* curDelta = _curDelta + k * curDeltaAea + sp * curDeltaSize2;
int half = (kernelSize >> 1);
for(int y = 0; y < curDeltaSize2; y += blockDim.y){
int yid = y + threadIdx.y;
if(yid < curDeltaSize2){
int ox = yid / curDeltaDim;
int oy = yid % curDeltaDim;
float* wgrad = _wgradTmp[k * curDeltaSize2 + yid] + sp * kernelSize2;
float delta = curDelta[yid];
for(int x = 0; x < kernelSize2; x+= blockDim.x){
int xid = x + threadIdx.x;
if(xid < kernelSize2){
int i = xid / kernelSize;
int j = xid % kernelSize;
int rox = ox + i - half;
int roy = oy + j - half;
if(rox >= 0 && rox < inputDim && roy >=0 && roy < inputDim){
float val = image[rox * inputDim + roy] * delta;
wgrad[xid] = val;
}else{
wgrad[xid] = 0;
}
}
}
}
}
}
/*
*block = dim3(localKernelSize, amount)
*thread= dim3(batch)
*/
__global__ void g_LocalConnect_Bgrad(float* _delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea,
int localKernelSize)
{
extern __shared__ float _sum[];
int local = blockIdx.x;
int k = blockIdx.y;
int sp = threadIdx.x;
int deltaSize2 = deltaSize * deltaSize;
float delta = _delta[k * deltaArea + sp * deltaSize2 + local];
_sum[sp] = delta;
__syncthreads();
int len = batch;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(threadIdx.x == 0)
{
bgrad[k * localKernelSize + local][0] = _sum[0] / batch;
}
}
/*
* block = dim3(outputAmount, kernelSize * kernelSize);
* thread= dim3(batch);
*/
__global__ void g_LocalConnect_wgrad_Add(
float** _WgradTmp,
float** Wgrad,
float** w,
int kernelSize,
int batch,
float lambda,
int wgradTmpArea,
int wgradArea,
int wArea)
{
extern __shared__ float _sum[];
int ok = blockIdx.x;
int kid = blockIdx.y;
int tid = threadIdx.x;
_sum[threadIdx.x] = 0;
__syncthreads();
int tlen = batch;
float* wgradTmp = _WgradTmp[ok];
int kernelSize2 = kernelSize * kernelSize;
for(int i = 0; i < tlen; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < tlen)
{
_sum[threadIdx.x] += wgradTmp[b * kernelSize2 + kid];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < (len >> 1))
{
_sum[tid] += _sum[tid + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(tid == 0)
{
Wgrad[ok][kid] = _sum[0] / batch + w[ok][kid] * lambda;
}
}
|
a1088d8e1c7c40ce671cf9c2e8fe510c54240a17.cu
|
#include "LocalConnect.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
#include "../layers/BranchLayer.h"
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation_kernelSize1(
float* _curDelta,
float**_w,
float* _nextDelta,
int dim,
int area,
int localKernelSize);
/*
* block = dim3(outputAmount, kernelSize * kernelSize);
* thread= dim3(batch);
*/
__global__ void g_LocalConnect_wgrad_Add(
float** _WgradTmp,
float** Wgrad,
float** w,
int kernelSize,
int batch,
float lambda,
int wgradTmpArea,
int wgradArea,
int wArea);
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_wgrad_kernelSize1(
float* _inputs,
float* _curDelta,
float** _wgradTmp,
/*float** _w,*/
int dim,
int area,
int batch,
float lambda);
/*
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_1(
float** arrayS,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputDim,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
template <int OUTPUTDIM2, int THREADS>
__global__ void g_LocalConnect_feedforward_s_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
* const kernelsize = 1
*/
__global__ void g_LocalConnect_feedforward_kernelSize1_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int dim,
int area,
int batch,
int k1Amount,
int localKernelSize);
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation(
float* _convDelta,
float**_w,
float* _poolDelta,
int _convOutputSize,
int _poolOutputSize,
int _kernelAmount1,
int _kernelAmount2,
int _kernelSize,
int _convDeltaArea,
int _poolDeltaArea,
int localKernelSize);
/*
*function: get convolution layer and pooling output
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(min(outputDim * outputDim, 256));
*/
__global__ void g_LocalConnect_feedforward_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
/*
* blocks : dim3(batch, cuKernelScan[cl] * localKernelSize, Config::instance()->getChannels()),
* threads : dim3(threadidx)
*/
__global__ void g_LocalConnect_wgrad(
float* _inputs,
float* _curDelta,
float** _wgrad,
int inputDim,
int curDeltaDim,
int kernelSize,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea,
int batch,
float lambda);
/*
* blocks : dim3(batch, cuKernelScan[cl], Config::instance()->getChannels()),
* threads : dim3(threadidx)
*/
__global__ void g_LocalConnect_wgrad_1(float** sArray,
float* convDelta,
float* WgradTmp,
int imgSize,
int convOutputSize,
int kernelAmount2,
int kernelSize,
int sArrayArea,
int convDeltaArea,
int wgrapTmpArea,
int localKernelSize);
/*
*block = dim3(localKernelSize, amount);
*thread= dim3(batch);
*
*/
__global__ void g_LocalConnect_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea,
int localKernelSize);
void LocalConnect::calCost()
{
cost->gpuClear();
g_getCost_3<<<dim3(w.size()), dim3(32), sizeof(float) * 32>>>(cost->getDev(),
w.m_devPoint,
lambda,
w[0]->getLen());
cudaStreamSynchronize(0);
getLastCudaError("LocalConnect:getCost");
}
void LocalConnect::feedforward()
{
if((kernelSize == 3 || kernelSize == 5) && inputDim >= 4 && inputDim <= 8){
dim3 block = dim3(batch, outputAmount);
const int threads = 8;
dim3 thread= dim3(threads, outputDim * outputDim);
if(outputDim == 4){
g_LocalConnect_feedforward_s_2<16, threads><<<block, thread>>>(inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 5){
g_LocalConnect_feedforward_s_2<25, threads><<<block, thread>>>(inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 6){
g_LocalConnect_feedforward_s_2<36, threads><<<block, thread>>>(inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 7){
g_LocalConnect_feedforward_s_2<49, threads><<<block, thread>>>(inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 8){
g_LocalConnect_feedforward_s_2<64, threads><<<block, thread>>>(inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_s_2");
}
else if(kernelSize == 1){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
g_LocalConnect_feedforward_kernelSize1_2<<<block, thread>>>(
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
inputs->getArea(),
batch,
outputAmount,
localKernelSize);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_kernelSize1_2");
}
else {
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(8, min(outputDim * outputDim, 64));
g_LocalConnect_feedforward_2<<<block, thread,
sizeof(float) * outputDim * outputDim>>>
(inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
kernelSize,
outputDim,
inputs->getArea(),
outputs->getArea(),
batch,
outputAmount,
localKernelSize);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_2");
}
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
g_nonLinearity<<<block, thread>>>(
outputs->getDev(),
outputs->getLen(),
NON_LINEARITY);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("LocalConnect::g_nonLinearity");
}
}
void LocalConnect::backpropagation()
{
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
g_dnonLinearity<<<block, thread>>>(curDelta->getDev(),
outputs->getDev(), curDelta->getLen(), NON_LINEARITY);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("LocalConnect::g_dnonLinearity");
}
if(inputs){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
preDelta->gpuClear();
if(kernelSize == 1){
g_LocalConnect_backpropagation_kernelSize1<<<block, thread>>>(
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
curDelta->getArea(),
localKernelSize);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("LocalConnect::g_LocalConnect_backpropagation_kernelSize1");
}else{
g_LocalConnect_backpropagation<<<block, thread>>>(
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
inputDim,
inputAmount,
outputAmount,
kernelSize,
curDelta->getArea(),
preDelta->getArea(),
localKernelSize);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("LocalConnect::g_LocalConnect_backpropagation");
}
}
}
void LocalConnect::getGrad()
{
if(kernelSize == 1){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
g_LocalConnect_wgrad_kernelSize1<<<block, thread, sizeof(float) * batch>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
inputs->getArea(),
batch,
lambda);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_LocalConnect_wgrad_kernelSize1");
block = dim3(outputAmount, kernelSize * kernelSize);
thread = dim3(batch);
}
else{
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(9, min(outputDim * outputDim, 64));
g_LocalConnect_wgrad<<<block, thread, sizeof(float) * inputDim * inputDim>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
kernelSize,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea(),
batch,
lambda);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_LocalConnect_wgrad");
}
dim3 block = dim3(outputAmount * localKernelSize, kernelSize * kernelSize);
dim3 thread = dim3(batch);
g_LocalConnect_wgrad_Add<<<block, thread, sizeof(float) * batch>>>(
wgradTmp.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
kernelSize,
batch,
lambda,
wgradTmp[0]->getArea(),
wgrad[0]->getArea(),
w[0]->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_LocalConnect_wgrad_Add");
block = dim3(localKernelSize, outputAmount);
thread= dim3(batch);
g_LocalConnect_Bgrad<<<block,thread,sizeof(float) * batch>>>
(curDelta->getDev(),
bgrad.m_devPoint,
outputDim,
outputAmount,
batch,
curDelta->getArea(),
localKernelSize);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("LocalConnect::getGrad::g_LocalConnect_Bgrad");
}
void LocalConnect::updateWeight()
{
dim3 thread = min(256, w[0]->getLen());
dim3 block = momentum_w.size();
g_vecAdd<<<block, thread>>>(momentum_w.m_devPoint, wgrad.m_devPoint, w.m_devPoint,
momentum_b.m_devPoint, bgrad.m_devPoint, b.m_devPoint,
w[0]->getLen(), b[0]->getLen(),
Config::instance()->getMomentum(),
Config::instance()->getLrate(), Config::instance()->getLrate());
}
LocalConnect::LocalConnect(std::string name)
{
m_name = name;
ConfigLocal* config = static_cast<ConfigLocal*>(Config::instance()->getLayerByName(m_name));
ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getOutputs();
if(inputs == NULL){
/*inputs = NULL the type must be BranchLayers*/
Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer());
Assert(config->m_subInput != std::string("NULL"));
BranchLayer* bl = static_cast<BranchLayer*>(preLayer);
inputs = bl->getSubOutput(config->m_subInput);
preDelta = bl->getSubCurDelta(config->m_subInput);
}else{
preDelta = preLayer->getCurDelta();
}
inputAmount = preLayer->outputAmount;
outputAmount = inputAmount;
kernelSize = config->m_kernelSize;
inputDim = preLayer->outputDim;
outputDim = inputDim;
batch = Config::instance()->getBatchSize();
lambda = config->m_weightDecay;
NON_LINEARITY = config->m_nonLinearity;
localKernelSize = outputDim * outputDim;
outputs = new cuMatrix<float> (batch, outputDim * outputDim, outputAmount);
curDelta = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
for(int i = 0; i < outputAmount * localKernelSize; i++){
w.push_back(new cuMatrix<float>(kernelSize, kernelSize, 1));
b.push_back(new cuMatrix<float>(1, 1, 1));
wgrad.push_back(new cuMatrix<float>(kernelSize, kernelSize, 1));
bgrad.push_back(new cuMatrix<float>(1, 1, 1));
wgradTmp.push_back(new cuMatrix<float>(batch, kernelSize * kernelSize, 1));
}
w.toGpu();
b.toGpu();
wgrad.toGpu();
bgrad.toGpu();
wgradTmp.toGpu();
for(int i = 0; i < outputAmount * localKernelSize; i++){
momentum_w.push_back(new cuMatrix<float>(kernelSize, kernelSize, 1));
momentum_b.push_back(new cuMatrix<float>(1, 1, 1));
}
momentum_w.toGpu();
momentum_b.toGpu();
this->initRandom();
Layers::instance()->set(m_name, this);
}
void LocalConnect::save(FILE* file)
{
for(int a = 0; a < (int)w.size(); a++){
w[a]->toCpu();
b[a]->toCpu();
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fprintf(file, "%f ", w[a]->get(i, j, c));
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
fprintf(file, "%f ", b[a]->get(i, j, c));
}
}
}
}
}
void LocalConnect::clearMomentum()
{
for(int i = 0; i < (int)momentum_b.size(); i++){
momentum_b[i]->gpuClear();
}
for(int i = 0; i < (int)momentum_w.size(); i++){
momentum_w[i]->gpuClear();
}
}
void LocalConnect::initRandom()
{
//srand(clock());
float initW = Config::instance()->getLayerByName(m_name)->m_initW;
if(Config::instance()->getLayerByName(m_name)->isGaussian()){
for(int i = 0; i < (int)w.size(); i++){
float epsilon = initW;
for(int c = 0; c < w[i]->channels; c++)
{
float r1 = 0.01f + 5.0f * (rand()) / RAND_MAX;
float r2 = 0.01f + 5.0f * (rand()) / RAND_MAX;
createGaussian(w[i]->getHost() + c * w[i]->getArea(), r1,r2,
kernelSize, kernelSize, w[i]->channels,
epsilon);
}
w[i]->toGpu();
}
}
else{
for(int i = 0; i < (int)w.size(); i++){
for(int j = 0; j < w[i]->getLen(); j++){
w[i]->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f);
//printf("%f ", w[i]->hostData[j]);
}//printf("\n");
w[i]->toGpu();
}
}
}
void LocalConnect::initFromCheckpoint(FILE* file)
{
float val = 0;
for(int a = 0; a < (int)w.size(); a++){
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF){
LOG("scanf fail", "result/log.txt");
}
w[a]->set(i, j, c, val);
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF){
LOG("scanf fail", "result/log.txt");
}
b[a]->set(i, j, c, val);
}
}
}
w[a]->toGpu();
b[a]->toGpu();
}
}
/*
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_1(
float** arrayS,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputDim,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
extern __shared__ float image[];
int sp = blockIdx.x;
int k = blockIdx.y;
int OutputSize2 = outputDim * outputDim;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
float* curInput = arrayS[sp] + k * inputSize2;
float* curOutput = _output + outputArea * k + sp * OutputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < OutputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < OutputSize2)
{
int x = tyid / outputDim;
int y = tyid % outputDim;
float val = 0.0;
float* w = arrayW[k * localKernelSize + tyid];
float b = arrayB[k * localKernelSize + tyid][0];
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
curOutput[tyid] = val + b;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(8, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
extern __shared__ float image[];
int sp = blockIdx.x;
int k = blockIdx.y;
int outputSize2 = outputSize * outputSize;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
float* curInput = inputs + k * inputArea + sp * inputSize2;
float* curOutput = _output + k * outputArea + sp * outputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
curOutput[id] = 0;
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < outputSize2)
{
int x = tyid / outputSize;
int y = tyid % outputSize;
float val = 0.0;
float* w = arrayW[k * localKernelSize + tyid];
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
atomicAdd(curOutput + tyid, val);
}
}
__syncthreads();
for(int i = 0; i < outputSize2; i += blockDim.y * blockDim.x)
{
int id = i + threadIdx.y * blockDim.x + threadIdx.x;
if(id < outputSize2)
{
float b = arrayB[k * localKernelSize + id][0];
curOutput[id] += b;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
* const kernelsize = 1
*/
__global__ void g_LocalConnect_feedforward_kernelSize1_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int dim,
int area,
int batch,
int k1Amount,
int localKernelSize)
{
int sp = blockIdx.x;
int k = blockIdx.y;
int outputSize2 = dim * dim;
int inputSize2 = dim * dim;
float* curInput = inputs + k * area + sp * inputSize2;
float* curOutput = _output + k * area + sp * outputSize2;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.x)
{
int tyid = ty + threadIdx.x;
if(tyid < outputSize2)
{
int skip = k * localKernelSize + tyid;
float val = 0.0;
float w = arrayW[skip][0];
float b = arrayB[skip][0];
val = curInput[tyid] * w + b;
curOutput[tyid] = val ;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(8, min(outputDim * outputDim, 64));
2<64, 9, 8, 8, 64>
*/
template <int OUTPUTDIM2, int THREADS>
__global__ void g_LocalConnect_feedforward_s_2(
float* inputs,
float** arrayW,
float** arrayB,
float* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
__shared__ float image[OUTPUTDIM2];
int sp = blockIdx.x;
int k = blockIdx.y;
__shared__ float convSum[OUTPUTDIM2][THREADS];
int outputSize2 = outputSize * outputSize;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
float* curInput = inputs + k * inputArea + sp * inputSize2;
float* curOutput = _output + k * outputArea + sp * outputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < outputSize2)
{
int x = tyid / outputSize;
int y = tyid % outputSize;
float val = 0.0;
float* w = arrayW[k * localKernelSize + tyid];
float* _convSum = convSum[threadIdx.y];
float b = arrayB[k * localKernelSize + tyid][0];
_convSum[threadIdx.x] = 0;
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
_convSum[threadIdx.x] = val;
__syncthreads();
#pragma unroll
for(int len = THREADS; len != 1; len = (len + 1) >> 1){
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) _convSum[threadIdx.x] += _convSum[threadIdx.x + skip];
__syncthreads();
}
if(threadIdx.x == 0)
curOutput[tyid] = _convSum[0] + b;
}
}
}
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation_kernelSize1(
float* _curDelta,
float**_w,
float* _nextDelta,
int dim,
int area,
int localKernelSize)
{
int s = blockIdx.x;
int k = blockIdx.y;
int dim2 = dim * dim;
int skip = k * area + s * dim2;
float* curDelta = _curDelta + skip;
float* nxtDelta = _nextDelta + skip;
for (int tidx = 0; tidx < dim2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < dim2) {
float val = 0.0;
float w = _w[k * localKernelSize + idx][0];
val = curDelta[idx] * w;
nxtDelta[idx] = val;
}
}
}
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation (
float* _convDelta,
float**_w,
float* _poolDelta,
int _convOutputSize,
int _poolOutputSize,
int _kernelAmount1,
int _kernelAmount2,
int _kernelSize,
int _convDeltaArea,
int _poolDeltaArea,
int localKernelSize)
{
int curSize = _convOutputSize;
int wSize = _kernelSize;
int nxtSize = _poolOutputSize;
int s = blockIdx.x;
int k = blockIdx.y;
int curSize2 = curSize * curSize;
int nxtSize2 = nxtSize * nxtSize;
float* curDelta = _convDelta + k * _convDeltaArea + s * curSize2;
float* nxtDelta = _poolDelta + k * _poolDeltaArea + s * nxtSize2;
int half = wSize >> 1;
for (int tidx = 0; tidx < nxtSize2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < nxtSize2) {
int i = idx / nxtSize;
int j = idx % nxtSize;
float val = 0.0;
for (int x = 0; x < wSize; x++) {
for (int y = 0; y < wSize; y++) {
int cx = i + (half - x);
int cy = j + (half - y);
int wx = x;
int wy = y;
if(cx >= 0 && cx < curSize && cy >= 0 && cy < curSize){
float* w = _w[k * localKernelSize + cx * curSize + cy];
val += curDelta[cx * curSize + cy] * w[wx * wSize + wy];
}
}
}
nxtDelta[idx] = val;
}
}
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
*/
__global__ void g_LocalConnect_wgrad_kernelSize1(
float* _inputs,
float* _curDelta,
float** _wgradTmp,
int dim,
int area,
int batch,
float lambda)
{
int b = blockIdx.x;
int k = blockIdx.y;
int dim2 = dim * dim;
int skip = k * area + b * dim2;
float* input = _inputs + skip;
float* curDelta = _curDelta + skip;
for(int y = 0; y < dim2; y += blockDim.x){
int yid = y + threadIdx.x;
if(yid < dim2){
skip = k * dim2 + yid;
float val = input[yid] * curDelta[yid];
//_wgradTmp[skip][0] = val / batch + lambda * _w[skip][0];
_wgradTmp[skip][0] = val;
}
}
}
/*
*dim3 block = dim3(batch, outputAmount);
*dim3 thread= min(9, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_wgrad(
float* _inputs,
float* _curDelta,
float** _wgradTmp,
/*float** _w,*/
int inputDim,
int curDeltaDim,
int kernelSize,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea,
int batch,
float lambda)
{
int sp = blockIdx.x;
int k = blockIdx.y;
extern __shared__ float image[];
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
int kernelSize2 = kernelSize * kernelSize;
float* input = _inputs + k * inputArea + sp * inputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = input[id];
}
}
__syncthreads();
float* curDelta = _curDelta + k * curDeltaAea + sp * curDeltaSize2;
int half = (kernelSize >> 1);
for(int y = 0; y < curDeltaSize2; y += blockDim.y){
int yid = y + threadIdx.y;
if(yid < curDeltaSize2){
int ox = yid / curDeltaDim;
int oy = yid % curDeltaDim;
float* wgrad = _wgradTmp[k * curDeltaSize2 + yid] + sp * kernelSize2;
float delta = curDelta[yid];
for(int x = 0; x < kernelSize2; x+= blockDim.x){
int xid = x + threadIdx.x;
if(xid < kernelSize2){
int i = xid / kernelSize;
int j = xid % kernelSize;
int rox = ox + i - half;
int roy = oy + j - half;
if(rox >= 0 && rox < inputDim && roy >=0 && roy < inputDim){
float val = image[rox * inputDim + roy] * delta;
wgrad[xid] = val;
}else{
wgrad[xid] = 0;
}
}
}
}
}
}
/*
*block = dim3(localKernelSize, amount)
*thread= dim3(batch)
*/
__global__ void g_LocalConnect_Bgrad(float* _delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea,
int localKernelSize)
{
extern __shared__ float _sum[];
int local = blockIdx.x;
int k = blockIdx.y;
int sp = threadIdx.x;
int deltaSize2 = deltaSize * deltaSize;
float delta = _delta[k * deltaArea + sp * deltaSize2 + local];
_sum[sp] = delta;
__syncthreads();
int len = batch;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(threadIdx.x == 0)
{
bgrad[k * localKernelSize + local][0] = _sum[0] / batch;
}
}
/*
* block = dim3(outputAmount, kernelSize * kernelSize);
* thread= dim3(batch);
*/
__global__ void g_LocalConnect_wgrad_Add(
float** _WgradTmp,
float** Wgrad,
float** w,
int kernelSize,
int batch,
float lambda,
int wgradTmpArea,
int wgradArea,
int wArea)
{
extern __shared__ float _sum[];
int ok = blockIdx.x;
int kid = blockIdx.y;
int tid = threadIdx.x;
_sum[threadIdx.x] = 0;
__syncthreads();
int tlen = batch;
float* wgradTmp = _WgradTmp[ok];
int kernelSize2 = kernelSize * kernelSize;
for(int i = 0; i < tlen; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < tlen)
{
_sum[threadIdx.x] += wgradTmp[b * kernelSize2 + kid];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < (len >> 1))
{
_sum[tid] += _sum[tid + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(tid == 0)
{
Wgrad[ok][kid] = _sum[0] / batch + w[ok][kid] * lambda;
}
}
|
d85b318230e070b19525f8b0a205c60cb753cdc8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ACMP.h"
__device__ void sort_small(float *d, const int n)
{
int j;
for (int i = 1; i < n; i++) {
float tmp = d[i];
for (j = i; j >= 1 && tmp < d[j-1]; j--)
d[j] = d[j-1];
d[j] = tmp;
}
}
__device__ void sort_small_weighted(float *d, float *w, int n)
{
int j;
for (int i = 1; i < n; i++) {
float tmp = d[i];
float tmp_w = w[i];
for (j = i; j >= 1 && tmp < d[j - 1]; j--) {
d[j] = d[j - 1];
w[j] = w[j - 1];
}
d[j] = tmp;
w[j] = tmp_w;
}
}
__device__ int FindMinCostIndex(const float *costs, const int n)
{
float min_cost = costs[0];
int min_cost_idx = 0;
for (int idx = 1; idx < n; ++idx) {
if (costs[idx] <= min_cost) {
min_cost = costs[idx];
min_cost_idx = idx;
}
}
return min_cost_idx;
}
__device__ int FindMaxCostIndex(const float *costs, const int n)
{
float max_cost = costs[0];
int max_cost_idx = 0;
for (int idx = 1; idx < n; ++idx) {
if (costs[idx] >= max_cost) {
max_cost = costs[idx];
max_cost_idx = idx;
}
}
return max_cost_idx;
}
__device__ void setBit(unsigned int &input, const unsigned int n)
{
input |= (unsigned int)(1 << n);
}
__device__ int isSet(unsigned int input, const unsigned int n)
{
return (input >> n) & 1;
}
__device__ void Mat33DotVec3(const float mat[9], const float4 vec, float4 *result)
{
result->x = mat[0] * vec.x + mat[1] * vec.y + mat[2] * vec.z;
result->y = mat[3] * vec.x + mat[4] * vec.y + mat[5] * vec.z;
result->z = mat[6] * vec.x + mat[7] * vec.y + mat[8] * vec.z;
}
__device__ float Vec3DotVec3(const float4 vec1, const float4 vec2)
{
return vec1.x * vec2.x + vec1.y * vec2.y + vec1.z * vec2.z;
}
__device__ void NormalizeVec3 (float4 *vec)
{
const float normSquared = vec->x * vec->x + vec->y * vec->y + vec->z * vec->z;
const float inverse_sqrt = rsqrtf (normSquared);
vec->x *= inverse_sqrt;
vec->y *= inverse_sqrt;
vec->z *= inverse_sqrt;
}
__device__ void TransformPDFToCDF(float* probs, const int num_probs)
{
float prob_sum = 0.0f;
for (int i = 0; i < num_probs; ++i) {
prob_sum += probs[i];
}
const float inv_prob_sum = 1.0f / prob_sum;
float cum_prob = 0.0f;
for (int i = 0; i < num_probs; ++i) {
const float prob = probs[i] * inv_prob_sum;
cum_prob += prob;
probs[i] = cum_prob;
}
}
__device__ void Get3DPoint(const Camera camera, const int2 p, const float depth, float *X)
{
X[0] = depth * (p.x - camera.K[2]) / camera.K[0];
X[1] = depth * (p.y - camera.K[5]) / camera.K[4];
X[2] = depth;
}
__device__ float4 GetViewDirection(const Camera camera, const int2 p, const float depth)
{
float X[3];
Get3DPoint(camera, p, depth, X);
float norm = sqrt(X[0] * X[0] + X[1] * X[1] + X[2] * X[2]);
float4 view_direction;
view_direction.x = X[0] / norm;
view_direction.y = X[1] / norm;
view_direction.z = X[2] / norm;
view_direction.w = 0;
return view_direction;
}
__device__ float GetDistance2Origin(const Camera camera, const int2 p, const float depth, const float4 normal)
{
float X[3];
Get3DPoint(camera, p, depth, X);
return -(normal.x * X[0] + normal.y * X[1] + normal.z * X[2]);
}
__device__ float ComputeDepthfromPlaneHypothesis(const Camera camera, const float4 plane_hypothesis, const int2 p)
{
return -plane_hypothesis.w * camera.K[0] / ((p.x - camera.K[2]) * plane_hypothesis.x + (camera.K[0] / camera.K[4]) * (p.y - camera.K[5]) * plane_hypothesis.y + camera.K[0] * plane_hypothesis.z);
}
__device__ float4 GenerateRandomNormal(const Camera camera, const int2 p, hiprandState_t *rand_state, const float depth)
{
float4 normal;
float q1 = 1.0f;
float q2 = 1.0f;
float s = 2.0f;
while (s >= 1.0f) {
q1 = 2.0f * hiprand_uniform(rand_state) -1.0f;
q2 = 2.0f * hiprand_uniform(rand_state) - 1.0f;
s = q1 * q1 + q2 * q2;
}
const float sq = sqrt(1.0f - s);
normal.x = 2.0f * q1 * sq;
normal.y = 2.0f * q2 * sq;
normal.z = 1.0f - 2.0f * s;
normal.w = 0;
float4 view_direction = GetViewDirection(camera, p, depth);
float dot_product = normal.x * view_direction.x + normal.y * view_direction.y + normal.z * view_direction.z;
if (dot_product > 0.0f) {
normal.x = -normal.x;
normal.y = -normal.y;
normal.z = - normal.z;
}
NormalizeVec3(&normal);
return normal;
}
__device__ float4 GeneratePerturbedNormal(const Camera camera, const int2 p, const float4 normal, hiprandState_t *rand_state, const float perturbation)
{
float4 view_direction = GetViewDirection(camera, p, 1.0f);
const float a1 = (hiprand_uniform(rand_state) - 0.5f) * perturbation;
const float a2 = (hiprand_uniform(rand_state) - 0.5f) * perturbation;
const float a3 = (hiprand_uniform(rand_state) - 0.5f) * perturbation;
const float sin_a1 = sin(a1);
const float sin_a2 = sin(a2);
const float sin_a3 = sin(a3);
const float cos_a1 = cos(a1);
const float cos_a2 = cos(a2);
const float cos_a3 = cos(a3);
float R[9];
R[0] = cos_a2 * cos_a3;
R[1] = cos_a3 * sin_a1 * sin_a2 - cos_a1 * sin_a3;
R[2] = sin_a1 * sin_a3 + cos_a1 * cos_a3 * sin_a2;
R[3] = cos_a2 * sin_a3;
R[4] = cos_a1 * cos_a3 + sin_a1 * sin_a2 * sin_a3;
R[5] = cos_a1 * sin_a2 * sin_a3 - cos_a3 * sin_a1;
R[6] = -sin_a2;
R[7] = cos_a2 * sin_a1;
R[8] = cos_a1 * cos_a2;
float4 normal_perturbed;
Mat33DotVec3(R, normal, &normal_perturbed);
if (Vec3DotVec3(normal_perturbed, view_direction) >= 0.0f) {
normal_perturbed = normal;
}
NormalizeVec3(&normal_perturbed);
return normal_perturbed;
}
__device__ float4 GenerateRandomPlaneHypothesis(const Camera camera, const int2 p, hiprandState_t *rand_state, const float depth_min, const float depth_max)
{
float depth = hiprand_uniform(rand_state) * (depth_max - depth_min) + depth_min;
float4 plane_hypothesis = GenerateRandomNormal(camera, p, rand_state, depth);
plane_hypothesis.w = GetDistance2Origin(camera, p, depth, plane_hypothesis);
return plane_hypothesis;
}
__device__ float4 GeneratePertubedPlaneHypothesis(const Camera camera, const int2 p, hiprandState_t *rand_state, const float perturbation, const float4 plane_hypothesis_now, const float depth_now, const float depth_min, const float depth_max)
{
float depth_perturbed = depth_now;
float dist_perturbed = plane_hypothesis_now.w;
const float dist_min_perturbed = (1 - perturbation) * dist_perturbed;
const float dist_max_perturbed = (1 + perturbation) * dist_perturbed;
float4 plane_hypothesis_temp = plane_hypothesis_now;
do {
dist_perturbed = hiprand_uniform(rand_state) * (dist_max_perturbed - dist_min_perturbed) + dist_min_perturbed;
plane_hypothesis_temp.w = dist_perturbed;
depth_perturbed = ComputeDepthfromPlaneHypothesis(camera, plane_hypothesis_temp, p);
} while (depth_perturbed < depth_min && depth_perturbed > depth_max);
float4 plane_hypothesis = GeneratePerturbedNormal(camera, p, plane_hypothesis_now, rand_state, perturbation * M_PI);
plane_hypothesis.w = dist_perturbed;
return plane_hypothesis;
}
__device__ void ComputeHomography(const Camera ref_camera, const Camera src_camera, const float4 plane_hypothesis, float *H)
{
float ref_C[3];
float src_C[3];
ref_C[0] = -(ref_camera.R[0] * ref_camera.t[0] + ref_camera.R[3] * ref_camera.t[1] + ref_camera.R[6] * ref_camera.t[2]);
ref_C[1] = -(ref_camera.R[1] * ref_camera.t[0] + ref_camera.R[4] * ref_camera.t[1] + ref_camera.R[7] * ref_camera.t[2]);
ref_C[2] = -(ref_camera.R[2] * ref_camera.t[0] + ref_camera.R[5] * ref_camera.t[1] + ref_camera.R[8] * ref_camera.t[2]);
src_C[0] = -(src_camera.R[0] * src_camera.t[0] + src_camera.R[3] * src_camera.t[1] + src_camera.R[6] * src_camera.t[2]);
src_C[1] = -(src_camera.R[1] * src_camera.t[0] + src_camera.R[4] * src_camera.t[1] + src_camera.R[7] * src_camera.t[2]);
src_C[2] = -(src_camera.R[2] * src_camera.t[0] + src_camera.R[5] * src_camera.t[1] + src_camera.R[8] * src_camera.t[2]);
float R_relative[9];
float C_relative[3];
float t_relative[3];
R_relative[0] = src_camera.R[0] * ref_camera.R[0] + src_camera.R[1] * ref_camera.R[1] + src_camera.R[2] *ref_camera.R[2];
R_relative[1] = src_camera.R[0] * ref_camera.R[3] + src_camera.R[1] * ref_camera.R[4] + src_camera.R[2] *ref_camera.R[5];
R_relative[2] = src_camera.R[0] * ref_camera.R[6] + src_camera.R[1] * ref_camera.R[7] + src_camera.R[2] *ref_camera.R[8];
R_relative[3] = src_camera.R[3] * ref_camera.R[0] + src_camera.R[4] * ref_camera.R[1] + src_camera.R[5] *ref_camera.R[2];
R_relative[4] = src_camera.R[3] * ref_camera.R[3] + src_camera.R[4] * ref_camera.R[4] + src_camera.R[5] *ref_camera.R[5];
R_relative[5] = src_camera.R[3] * ref_camera.R[6] + src_camera.R[4] * ref_camera.R[7] + src_camera.R[5] *ref_camera.R[8];
R_relative[6] = src_camera.R[6] * ref_camera.R[0] + src_camera.R[7] * ref_camera.R[1] + src_camera.R[8] *ref_camera.R[2];
R_relative[7] = src_camera.R[6] * ref_camera.R[3] + src_camera.R[7] * ref_camera.R[4] + src_camera.R[8] *ref_camera.R[5];
R_relative[8] = src_camera.R[6] * ref_camera.R[6] + src_camera.R[7] * ref_camera.R[7] + src_camera.R[8] *ref_camera.R[8];
C_relative[0] = (ref_C[0] - src_C[0]);
C_relative[1] = (ref_C[1] - src_C[1]);
C_relative[2] = (ref_C[2] - src_C[2]);
t_relative[0] = src_camera.R[0] * C_relative[0] + src_camera.R[1] * C_relative[1] + src_camera.R[2] * C_relative[2];
t_relative[1] = src_camera.R[3] * C_relative[0] + src_camera.R[4] * C_relative[1] + src_camera.R[5] * C_relative[2];
t_relative[2] = src_camera.R[6] * C_relative[0] + src_camera.R[7] * C_relative[1] + src_camera.R[8] * C_relative[2];
H[0] = R_relative[0] - t_relative[0] * plane_hypothesis.x / plane_hypothesis.w;
H[1] = R_relative[1] - t_relative[0] * plane_hypothesis.y / plane_hypothesis.w;
H[2] = R_relative[2] - t_relative[0] * plane_hypothesis.z / plane_hypothesis.w;
H[3] = R_relative[3] - t_relative[1] * plane_hypothesis.x / plane_hypothesis.w;
H[4] = R_relative[4] - t_relative[1] * plane_hypothesis.y / plane_hypothesis.w;
H[5] = R_relative[5] - t_relative[1] * plane_hypothesis.z / plane_hypothesis.w;
H[6] = R_relative[6] - t_relative[2] * plane_hypothesis.x / plane_hypothesis.w;
H[7] = R_relative[7] - t_relative[2] * plane_hypothesis.y / plane_hypothesis.w;
H[8] = R_relative[8] - t_relative[2] * plane_hypothesis.z / plane_hypothesis.w;
float tmp[9];
tmp[0] = H[0] / ref_camera.K[0];
tmp[1] = H[1] / ref_camera.K[4];
tmp[2] = -H[0] * ref_camera.K[2] / ref_camera.K[0] - H[1] * ref_camera.K[5] / ref_camera.K[4] + H[2];
tmp[3] = H[3] / ref_camera.K[0];
tmp[4] = H[4] / ref_camera.K[4];
tmp[5] = -H[3] * ref_camera.K[2] / ref_camera.K[0] - H[4] * ref_camera.K[5] / ref_camera.K[4] + H[5];
tmp[6] = H[6] / ref_camera.K[0];
tmp[7] = H[7] / ref_camera.K[4];
tmp[8] = -H[6] * ref_camera.K[2] / ref_camera.K[0] - H[7] * ref_camera.K[5] / ref_camera.K[4] + H[8];
H[0] = src_camera.K[0] * tmp[0] + src_camera.K[2] * tmp[6];
H[1] = src_camera.K[0] * tmp[1] + src_camera.K[2] * tmp[7];
H[2] = src_camera.K[0] * tmp[2] + src_camera.K[2] * tmp[8];
H[3] = src_camera.K[4] * tmp[3] + src_camera.K[5] * tmp[6];
H[4] = src_camera.K[4] * tmp[4] + src_camera.K[5] * tmp[7];
H[5] = src_camera.K[4] * tmp[5] + src_camera.K[5] * tmp[8];
H[6] = src_camera.K[8] * tmp[6];
H[7] = src_camera.K[8] * tmp[7];
H[8] = src_camera.K[8] * tmp[8];
}
__device__ float2 ComputeCorrespondingPoint(const float *H, const int2 p)
{
float3 pt;
pt.x = H[0] * p.x + H[1] * p.y + H[2];
pt.y = H[3] * p.x + H[4] * p.y + H[5];
pt.z = H[6] * p.x + H[7] * p.y + H[8];
return make_float2(pt.x / pt.z, pt.y / pt.z);
}
__device__ float4 TransformNormal(const Camera camera, float4 plane_hypothesis)
{
float4 transformed_normal;
transformed_normal.x = camera.R[0] * plane_hypothesis.x + camera.R[3] * plane_hypothesis.y + camera.R[6] * plane_hypothesis.z;
transformed_normal.y = camera.R[1] * plane_hypothesis.x + camera.R[4] * plane_hypothesis.y + camera.R[7] * plane_hypothesis.z;
transformed_normal.z = camera.R[2] * plane_hypothesis.x + camera.R[5] * plane_hypothesis.y + camera.R[8] * plane_hypothesis.z;
transformed_normal.w = plane_hypothesis.w;
return transformed_normal;
}
__device__ float4 TransformNormal2RefCam(const Camera camera, float4 plane_hypothesis)
{
float4 transformed_normal;
transformed_normal.x = camera.R[0] * plane_hypothesis.x + camera.R[1] * plane_hypothesis.y + camera.R[2] * plane_hypothesis.z;
transformed_normal.y = camera.R[3] * plane_hypothesis.x + camera.R[4] * plane_hypothesis.y + camera.R[5] * plane_hypothesis.z;
transformed_normal.z = camera.R[6] * plane_hypothesis.x + camera.R[7] * plane_hypothesis.y + camera.R[8] * plane_hypothesis.z;
transformed_normal.w = plane_hypothesis.w;
return transformed_normal;
}
__device__ float ComputeBilateralWeight(const float x_dist, const float y_dist, const float pix, const float center_pix, const float sigma_spatial, const float sigma_color)
{
const float spatial_dist = sqrt(x_dist * x_dist + y_dist * y_dist);
const float color_dist = fabs(pix - center_pix);
return exp(-spatial_dist / (2.0f * sigma_spatial* sigma_spatial) - color_dist / (2.0f * sigma_color * sigma_color));
}
__device__ float ComputeBilateralNCC(const hipTextureObject_t ref_image, const Camera ref_camera, const hipTextureObject_t src_image, const Camera src_camera, const int2 p, const float4 plane_hypothesis, const PatchMatchParams params)
{
const float cost_max = 2.0f;
int radius = params.patch_size / 2;
float H[9];
ComputeHomography(ref_camera, src_camera, plane_hypothesis, H);
float2 pt = ComputeCorrespondingPoint(H, p);
if (pt.x >= src_camera.width || pt.x < 0.0f || pt.y >= src_camera.height || pt.y < 0.0f) {
return cost_max;
}
float cost = 0.0f;
{
float sum_ref = 0.0f;
float sum_ref_ref = 0.0f;
float sum_src = 0.0f;
float sum_src_src = 0.0f;
float sum_ref_src = 0.0f;
float bilateral_weight_sum = 0.0f;
const float ref_center_pix = tex2D<float>(ref_image, p.x + 0.5f, p.y + 0.5f);
for (int i = -radius; i < radius + 1; i += params.radius_increment) {
float sum_ref_row = 0.0f;
float sum_src_row = 0.0f;
float sum_ref_ref_row = 0.0f;
float sum_src_src_row = 0.0f;
float sum_ref_src_row = 0.0f;
float bilateral_weight_sum_row = 0.0f;
for (int j = -radius; j < radius + 1; j += params.radius_increment) {
const int2 ref_pt = make_int2(p.x + i, p.y + j);
const float ref_pix = tex2D<float>(ref_image, ref_pt.x + 0.5f, ref_pt.y + 0.5f);
float2 src_pt = ComputeCorrespondingPoint(H, ref_pt);
const float src_pix = tex2D<float>(src_image, src_pt.x + 0.5f, src_pt.y + 0.5f);
float weight = ComputeBilateralWeight(i, j, ref_pix, ref_center_pix, params.sigma_spatial, params.sigma_color);
sum_ref_row += weight * ref_pix;
sum_ref_ref_row += weight * ref_pix * ref_pix;
sum_src_row += weight * src_pix;
sum_src_src_row += weight * src_pix * src_pix;
sum_ref_src_row += weight * ref_pix * src_pix;
bilateral_weight_sum_row += weight;
}
sum_ref += sum_ref_row;
sum_ref_ref += sum_ref_ref_row;
sum_src += sum_src_row;
sum_src_src += sum_src_src_row;
sum_ref_src += sum_ref_src_row;
bilateral_weight_sum += bilateral_weight_sum_row;
}
const float inv_bilateral_weight_sum = 1.0f / bilateral_weight_sum;
sum_ref *= inv_bilateral_weight_sum;
sum_ref_ref *= inv_bilateral_weight_sum;
sum_src *= inv_bilateral_weight_sum;
sum_src_src *= inv_bilateral_weight_sum;
sum_ref_src *= inv_bilateral_weight_sum;
const float var_ref = sum_ref_ref - sum_ref * sum_ref;
const float var_src = sum_src_src - sum_src * sum_src;
const float kMinVar = 1e-5f;
if (var_ref < kMinVar || var_src < kMinVar) {
return cost = cost_max;
} else {
const float covar_src_ref = sum_ref_src - sum_ref * sum_src;
const float var_ref_src = sqrt(var_ref * var_src);
return cost = max(0.0f, min(cost_max, 1.0f - covar_src_ref / var_ref_src));
}
}
}
__device__ float ComputeMultiViewInitialCostandSelectedViews(const hipTextureObject_t *images, const Camera *cameras, const int2 p, const float4 plane_hypothesis, unsigned int *selected_views, const PatchMatchParams params)
{
float cost_max = 2.0f;
float cost_vector[32] = {2.0f};
float cost_vector_copy[32] = {2.0f};
int cost_count = 0;
int num_valid_views = 0;
for (int i = 1; i < params.num_images; ++i) {
float c = ComputeBilateralNCC(images[0], cameras[0], images[i], cameras[i], p, plane_hypothesis, params);
cost_vector[i - 1] = c;
cost_vector_copy[i - 1] = c;
cost_count++;
if (c < cost_max) {
num_valid_views++;
}
}
sort_small(cost_vector, cost_count);
*selected_views = 0;
int top_k = min(num_valid_views, params.top_k);
if (top_k > 0) {
float cost = 0.0f;
for (int i = 0; i < top_k; ++i) {
cost += cost_vector[i];
}
float cost_threshold = cost_vector[top_k - 1];
for (int i = 0; i < params.num_images - 1; ++i) {
if (cost_vector_copy[i] <= cost_threshold) {
setBit(*selected_views, i);
}
}
return cost / top_k;
} else {
return cost_max;
}
}
__device__ void ComputeMultiViewCostVector(const hipTextureObject_t *images, const Camera *cameras, const int2 p, const float4 plane_hypothesis, float *cost_vector, const PatchMatchParams params)
{
for (int i = 1; i < params.num_images; ++i) {
cost_vector[i - 1] = ComputeBilateralNCC(images[0], cameras[0], images[i], cameras[i], p, plane_hypothesis, params);
}
}
__device__ float3 Get3DPointonWorld_cu(const float x, const float y, const float depth, const Camera camera)
{
float3 pointX;
float3 tmpX;
// Reprojection
pointX.x = depth * (x - camera.K[2]) / camera.K[0];
pointX.y = depth * (y - camera.K[5]) / camera.K[4];
pointX.z = depth;
// Rotation
tmpX.x = camera.R[0] * pointX.x + camera.R[3] * pointX.y + camera.R[6] * pointX.z;
tmpX.y = camera.R[1] * pointX.x + camera.R[4] * pointX.y + camera.R[7] * pointX.z;
tmpX.z = camera.R[2] * pointX.x + camera.R[5] * pointX.y + camera.R[8] * pointX.z;
// Transformation
float3 C;
C.x = -(camera.R[0] * camera.t[0] + camera.R[3] * camera.t[1] + camera.R[6] * camera.t[2]);
C.y = -(camera.R[1] * camera.t[0] + camera.R[4] * camera.t[1] + camera.R[7] * camera.t[2]);
C.z = -(camera.R[2] * camera.t[0] + camera.R[5] * camera.t[1] + camera.R[8] * camera.t[2]);
pointX.x = tmpX.x + C.x;
pointX.y = tmpX.y + C.y;
pointX.z = tmpX.z + C.z;
return pointX;
}
__device__ void ProjectonCamera_cu(const float3 PointX, const Camera camera, float2 &point, float &depth)
{
float3 tmp;
tmp.x = camera.R[0] * PointX.x + camera.R[1] * PointX.y + camera.R[2] * PointX.z + camera.t[0];
tmp.y = camera.R[3] * PointX.x + camera.R[4] * PointX.y + camera.R[5] * PointX.z + camera.t[1];
tmp.z = camera.R[6] * PointX.x + camera.R[7] * PointX.y + camera.R[8] * PointX.z + camera.t[2];
depth = camera.K[6] * tmp.x + camera.K[7] * tmp.y + camera.K[8] * tmp.z;
point.x = (camera.K[0] * tmp.x + camera.K[1] * tmp.y + camera.K[2] * tmp.z) / depth;
point.y = (camera.K[3] * tmp.x + camera.K[4] * tmp.y + camera.K[5] * tmp.z) / depth;
}
__device__ float ComputeGeomConsistencyCost(const hipTextureObject_t depth_image, const Camera ref_camera, const Camera src_camera, const float4 plane_hypothesis, const int2 p)
{
const float max_cost = 5.0f;
float depth = ComputeDepthfromPlaneHypothesis(ref_camera, plane_hypothesis, p);
float3 forward_point = Get3DPointonWorld_cu(p.x, p.y, depth, ref_camera);
float2 src_pt;
float src_d;
ProjectonCamera_cu(forward_point, src_camera, src_pt, src_d);
const float src_depth = tex2D<float>(depth_image, (int)src_pt.x + 0.5f, (int)src_pt.y + 0.5f);
if (src_depth == 0.0f) {
return max_cost;
}
float3 src_3D_pt = Get3DPointonWorld_cu(src_pt.x, src_pt.y, src_depth, src_camera);
float2 backward_point;
float ref_d;
ProjectonCamera_cu(src_3D_pt, ref_camera, backward_point, ref_d);
const float diff_col = p.x - backward_point.x;
const float diff_row = p.y - backward_point.y;
return min(max_cost, sqrt(diff_col * diff_col + diff_row * diff_row));
}
__global__ void RandomInitialization(cudaTextureObjects *texture_objects, Camera *cameras, float4 *plane_hypotheses, float *costs, hiprandState_t *rand_states, unsigned int *selected_views, float4 *prior_planes, unsigned int *plane_masks, const PatchMatchParams params)
{
const int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
int width = cameras[0].width;
int height = cameras[0].height;
if (p.x >= width || p.y >= height) {
return;
}
const int center = p.y * width + p.x;
hiprand_init(clock64(), p.y, p.x, &rand_states[center]);
if (params.geom_consistency) {
float4 plane_hypothesis = plane_hypotheses[center];
plane_hypothesis = TransformNormal2RefCam(cameras[0], plane_hypothesis);
float depth = plane_hypothesis.w;
plane_hypothesis.w = GetDistance2Origin(cameras[0], p, depth, plane_hypothesis);
plane_hypotheses[center] = plane_hypothesis;
costs[center] = ComputeMultiViewInitialCostandSelectedViews(texture_objects[0].images, cameras, p, plane_hypotheses[center], &selected_views[center], params);
}
else if (params.planar_prior) {
if (plane_masks[center] > 0 && costs[center] >= 0.1f) {
float perturbation = 0.02f;
float4 plane_hypothesis = prior_planes[center];
float depth_perturbed = plane_hypothesis.w;
const float depth_min_perturbed = (1 - 3 * perturbation) * depth_perturbed;
const float depth_max_perturbed = (1 + 3 * perturbation) * depth_perturbed;
depth_perturbed = hiprand_uniform(&rand_states[center]) * (depth_max_perturbed - depth_min_perturbed) + depth_min_perturbed;
float4 plane_hypothesis_perturbed = GeneratePerturbedNormal(cameras[0], p, plane_hypothesis, &rand_states[center], 3 * perturbation * M_PI);
plane_hypothesis_perturbed.w = depth_perturbed;
plane_hypotheses[center] = plane_hypothesis_perturbed;
costs[center] = ComputeMultiViewInitialCostandSelectedViews(texture_objects[0].images, cameras, p, plane_hypotheses[center], &selected_views[center], params);
}
else {
float4 plane_hypothesis = plane_hypotheses[center];
float depth = plane_hypothesis.w;
plane_hypothesis.w = GetDistance2Origin(cameras[0], p, depth, plane_hypothesis);
plane_hypotheses[center] = plane_hypothesis;
costs[center] = ComputeMultiViewInitialCostandSelectedViews(texture_objects[0].images, cameras, p, plane_hypotheses[center], &selected_views[center], params);
}
}
else {
plane_hypotheses[center] = GenerateRandomPlaneHypothesis(cameras[0], p, &rand_states[center], params.depth_min, params.depth_max);
costs[center] = ComputeMultiViewInitialCostandSelectedViews(texture_objects[0].images, cameras, p, plane_hypotheses[center], &selected_views[center], params);
}
}
__device__ void PlaneHypothesisRefinement(const hipTextureObject_t *images, const hipTextureObject_t *depth_images, const Camera *cameras, float4 *plane_hypothesis, float *depth, float *cost, hiprandState_t *rand_state, const float *view_weights, const float weight_norm, float4 *prior_planes, unsigned int *plane_masks, float *restricted_cost, const int2 p, const PatchMatchParams params)
{
float perturbation = 0.02f;
const int center = p.y * cameras[0].width + p.x;
float gamma = 0.5f;
float depth_sigma = (params.depth_max - params.depth_min) / 64.0f;
float two_depth_sigma_squared = 2 * depth_sigma * depth_sigma;
float angle_sigma = M_PI * (5.0f / 180.0f);
float two_angle_sigma_squared = 2 * angle_sigma * angle_sigma;
float beta = 0.18f;
float depth_prior = 0.0f;
float depth_rand;
float4 plane_hypothesis_rand;
if (params.planar_prior && plane_masks[center] > 0) {
depth_prior = ComputeDepthfromPlaneHypothesis(cameras[0], prior_planes[center], p);
depth_rand = hiprand_uniform(rand_state) * 6 * depth_sigma + (depth_prior - 3 * depth_sigma);
plane_hypothesis_rand = GeneratePerturbedNormal(cameras[0], p, prior_planes[center], rand_state, angle_sigma);
}
else {
depth_rand = hiprand_uniform(rand_state) * (params.depth_max - params.depth_min) + params.depth_min;
plane_hypothesis_rand = GenerateRandomNormal(cameras[0], p, rand_state, *depth);
}
float depth_perturbed = *depth;
const float depth_min_perturbed = (1 - perturbation) * depth_perturbed;
const float depth_max_perturbed = (1 + perturbation) * depth_perturbed;
do {
depth_perturbed = hiprand_uniform(rand_state) * (depth_max_perturbed - depth_min_perturbed) + depth_min_perturbed;
} while (depth_perturbed < params.depth_min && depth_perturbed > params.depth_max);
float4 plane_hypothesis_perturbed = GeneratePerturbedNormal(cameras[0], p, *plane_hypothesis, rand_state, perturbation * M_PI); // GeneratePertubedPlaneHypothesis(cameras[0], p, rand_state, perturbation, *plane_hypothesis, *depth, params.depth_min, params.depth_max);
const int num_planes = 5;
float depths[num_planes] = {depth_rand, *depth, depth_rand, *depth, depth_perturbed};
float4 normals[num_planes] = {*plane_hypothesis, plane_hypothesis_rand, plane_hypothesis_rand, plane_hypothesis_perturbed, *plane_hypothesis};
for (int i = 0; i < num_planes; ++i) {
float cost_vector[32] = {2.0f};
float4 temp_plane_hypothesis = normals[i];
temp_plane_hypothesis.w = GetDistance2Origin(cameras[0], p, depths[i], temp_plane_hypothesis); // dists[i];
ComputeMultiViewCostVector(images, cameras, p, temp_plane_hypothesis, cost_vector, params);
float temp_cost = 0.0f;
for (int j = 0; j < params.num_images - 1; ++j) {
if (view_weights[j] > 0) {
if (params.geom_consistency) {
temp_cost += view_weights[j] * (cost_vector[j] + 0.1f * ComputeGeomConsistencyCost(depth_images[j+1], cameras[0], cameras[j+1], temp_plane_hypothesis, p));
}
else {
temp_cost += view_weights[j] * cost_vector[j];
}
}
}
temp_cost /= weight_norm;
float depth_before = ComputeDepthfromPlaneHypothesis(cameras[0], temp_plane_hypothesis, p);
if (params.planar_prior && plane_masks[center] > 0) {
float depth_diff = depths[i] - depth_prior;
float angle_cos = Vec3DotVec3(prior_planes[center], temp_plane_hypothesis);
float angle_diff = acos(angle_cos);
float prior = gamma + exp(- depth_diff * depth_diff / two_depth_sigma_squared) * exp(- angle_diff * angle_diff / two_angle_sigma_squared);
float restricted_temp_cost = exp(-temp_cost * temp_cost / beta) * prior;
if (depth_before >= params.depth_min && depth_before <= params.depth_max && restricted_temp_cost > *restricted_cost) {
*depth = depth_before;
*plane_hypothesis = temp_plane_hypothesis;
*cost = temp_cost;
*restricted_cost = restricted_temp_cost;
}
}
else {
if (depth_before >= params.depth_min && depth_before <= params.depth_max && temp_cost < *cost) {
*depth = depth_before;
*plane_hypothesis = temp_plane_hypothesis;
*cost = temp_cost;
}
}
}
}
__device__ void CheckerboardPropagation(const hipTextureObject_t *images, const hipTextureObject_t *depths, const Camera *cameras, float4 *plane_hypotheses, float *costs, hiprandState_t *rand_states, unsigned int *selected_views, float4 *prior_planes, unsigned int *plane_masks, const int2 p, const PatchMatchParams params, const int iter)
{
int width = cameras[0].width;
int height = cameras[0].height;
if (p.x >= width || p.y >= height) {
return;
}
const int center = p.y * width + p.x;
int left_near = center - 1;
int left_far = center - 3;
int right_near = center + 1;
int right_far = center + 3;
int up_near = center - width;
int up_far = center - 3 * width;
int down_near = center + width;
int down_far = center + 3 * width;
// Adaptive Checkerboard Sampling
float cost_array[8][32] = {2.0f};
// 0 -- up_near, 1 -- up_far, 2 -- down_near, 3 -- down_far, 4 -- left_near, 5 -- left_far, 6 -- right_near, 7 -- right_far
bool flag[8] = {false};
int num_valid_pixels = 0;
float costMin;
int costMinPoint;
// up_far
if (p.y > 2) {
flag[1] = true;
num_valid_pixels++;
costMin = costs[up_far];
costMinPoint = up_far;
for (int i = 1; i < 11; ++i) {
if (p.y > 2 + 2 * i) {
int pointTemp = up_far - 2 * i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
up_far = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[up_far], cost_array[1], params);
}
// dwon_far
if (p.y < height - 3) {
flag[3] = true;
num_valid_pixels++;
costMin = costs[down_far];
costMinPoint = down_far;
for (int i = 1; i < 11; ++i) {
if (p.y < height - 3 - 2 * i) {
int pointTemp = down_far + 2 * i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
down_far = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[down_far], cost_array[3], params);
}
// left_far
if (p.x > 2) {
flag[5] = true;
num_valid_pixels++;
costMin = costs[left_far];
costMinPoint = left_far;
for (int i = 1; i < 11; ++i) {
if (p.x > 2 + 2 * i) {
int pointTemp = left_far - 2 * i;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
left_far = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[left_far], cost_array[5], params);
}
// right_far
if (p.x < width - 3) {
flag[7] = true;
num_valid_pixels++;
costMin = costs[right_far];
costMinPoint = right_far;
for (int i = 1; i < 11; ++i) {
if (p.x < width - 3 - 2 * i) {
int pointTemp = right_far + 2 * i;
if (costMin < costs[pointTemp]) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
right_far = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[right_far], cost_array[7], params);
}
// up_near
if (p.y > 0) {
flag[0] = true;
num_valid_pixels++;
costMin = costs[up_near];
costMinPoint = up_near;
for (int i = 0; i < 3; ++i) {
if (p.y > 1 + i && p.x > i) {
int pointTemp = up_near - (1 + i) * width - i;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
if (p.y > 1 + i && p.x < width - 1 - i) {
int pointTemp = up_near - (1 + i) * width + i;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
up_near = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[up_near], cost_array[0], params);
}
// down_near
if (p.y < height - 1) {
flag[2] = true;
num_valid_pixels++;
costMin = costs[down_near];
costMinPoint = down_near;
for (int i = 0; i < 3; ++i) {
if (p.y < height - 2 - i && p.x > i) {
int pointTemp = down_near + (1 + i) * width - i;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
if (p.y < height - 2 - i && p.x < width - 1 - i) {
int pointTemp = down_near + (1 + i) * width + i;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
down_near = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[down_near], cost_array[2], params);
}
// left_near
if (p.x > 0) {
flag[4] = true;
num_valid_pixels++;
costMin = costs[left_near];
costMinPoint = left_near;
for (int i = 0; i < 3; ++i) {
if (p.x > 1 + i && p.y > i) {
int pointTemp = left_near - (1 + i) - i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
if (p.x > 1 + i && p.y < height - 1 - i) {
int pointTemp = left_near - (1 + i) + i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
left_near = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[left_near], cost_array[4], params);
}
// right_near
if (p.x < width - 1) {
flag[6] = true;
num_valid_pixels++;
costMin = costs[right_near];
costMinPoint = right_near;
for (int i = 0; i < 3; ++i) {
if (p.x < width - 2 - i && p.y > i) {
int pointTemp = right_near + (1 + i) - i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
if (p.x < width - 2 - i && p.y < height - 1- i) {
int pointTemp = right_near + (1 + i) + i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
right_near = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[right_near], cost_array[6], params);
}
const int positions[8] = {up_near, up_far, down_near, down_far, left_near, left_far, right_near, right_far};
// Multi-hypothesis Joint View Selection
float view_weights[32] = {0.0f};
float view_selection_priors[32] = {0.0f};
int neighbor_positions[4] = {center - width, center + width, center - 1, center + 1};
for (int i = 0; i < 4; ++i) {
if (flag[2 * i]) {
for (int j = 0; j < params.num_images - 1; ++j) {
if (isSet(selected_views[neighbor_positions[i]], j) == 1) {
view_selection_priors[j] += 0.9f;
} else {
view_selection_priors[j] += 0.1f;
}
}
}
}
float sampling_probs[32] = {0.0f};
float cost_threshold = 0.8 * expf((iter) * (iter) / (-90.0f));
for (int i = 0; i < params.num_images - 1; i++) {
float count = 0;
int count_false = 0;
float tmpw = 0;
for (int j = 0; j < 8; j++) {
if (cost_array[j][i] < cost_threshold) {
tmpw += expf(cost_array[j][i] * cost_array[j][i] / (-0.18f));
count++;
}
if (cost_array[j][i] > 1.2f) {
count_false++;
}
}
if (count > 2 && count_false < 3) {
sampling_probs[i] = tmpw / count;
}
else if (count_false < 3) {
sampling_probs[i] = expf(cost_threshold * cost_threshold / (-0.32f));
}
sampling_probs[i] = sampling_probs[i] * view_selection_priors[i];
}
TransformPDFToCDF(sampling_probs, params.num_images - 1);
for (int sample = 0; sample < 15; ++sample) {
const float rand_prob = hiprand_uniform(&rand_states[center]) - FLT_EPSILON;
for (int image_id = 0; image_id < params.num_images - 1; ++image_id) {
const float prob = sampling_probs[image_id];
if (prob > rand_prob) {
view_weights[image_id] += 1.0f;
break;
}
}
}
unsigned int temp_selected_views = 0;
int num_selected_view = 0;
float weight_norm = 0;
for (int i = 0; i < params.num_images - 1; ++i) {
if (view_weights[i] > 0) {
setBit(temp_selected_views, i);
weight_norm += view_weights[i];
num_selected_view++;
}
}
float final_costs[8] = {0.0f};
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < params.num_images - 1; ++j) {
if (view_weights[j] > 0) {
if (params.geom_consistency) {
if (flag[i]) {
final_costs[i] += view_weights[j] * (cost_array[i][j] + 0.1f * ComputeGeomConsistencyCost(depths[j+1], cameras[0], cameras[j+1], plane_hypotheses[positions[i]], p));
}
else {
final_costs[i] += view_weights[j] * (cost_array[i][j] + 0.1f * 5.0f);
}
}
else {
final_costs[i] += view_weights[j] * cost_array[i][j];
}
}
}
final_costs[i] /= weight_norm;
}
const int min_cost_idx = FindMinCostIndex(final_costs, 8);
float cost_vector_now[32] = {2.0f};
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[center], cost_vector_now, params);
float cost_now = 0.0f;
for (int i = 0; i < params.num_images - 1; ++i) {
if (params.geom_consistency) {
cost_now += view_weights[i] * (cost_vector_now[i] + 0.1f * ComputeGeomConsistencyCost(depths[i+1], cameras[0], cameras[i+1], plane_hypotheses[center], p));
}
else {
cost_now += view_weights[i] * cost_vector_now[i];
}
}
cost_now /= weight_norm;
costs[center] = cost_now;
float depth_now = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[center], p);
float restricted_cost = 0.0f;
if (params.planar_prior) {
float restricted_final_costs[8] = {0.0f};
float gamma = 0.5f;
float depth_sigma = (params.depth_max - params.depth_min) / 64.0f;
float two_depth_sigma_squared = 2 * depth_sigma * depth_sigma;
float angle_sigma = M_PI * (5.0f / 180.0f);
float two_angle_sigma_squared = 2 * angle_sigma * angle_sigma;
float depth_prior = ComputeDepthfromPlaneHypothesis(cameras[0], prior_planes[center], p);
float beta = 0.18f;
if (plane_masks[center] > 0) {
for (int i = 0; i < 8; i++) {
if (flag[i]) {
float depth_now = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[positions[i]], p);
float depth_diff = depth_now - depth_prior;
float angle_cos = Vec3DotVec3(prior_planes[center], plane_hypotheses[positions[i]]);
float angle_diff = acos(angle_cos);
float prior = gamma + exp(- depth_diff * depth_diff / two_depth_sigma_squared) * exp(- angle_diff * angle_diff / two_angle_sigma_squared);
restricted_final_costs[i] = exp(-final_costs[i] * final_costs[i] / beta) * prior;
}
}
const int max_cost_idx = FindMaxCostIndex(restricted_final_costs, 8);
float restricted_cost_now = 0.0f;
float depth_now = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[center], p);
float depth_diff = depth_now - depth_prior;
float angle_cos = Vec3DotVec3(prior_planes[center], plane_hypotheses[center]);
float angle_diff = acos(angle_cos);
float prior = gamma + exp(- depth_diff * depth_diff / two_depth_sigma_squared) * exp(- angle_diff * angle_diff / two_angle_sigma_squared);
restricted_cost_now = exp(-cost_now * cost_now / beta) * prior;
if (flag[max_cost_idx]) {
float depth_before = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[positions[max_cost_idx]], p);
if (depth_before >= params.depth_min && depth_before <= params.depth_max && restricted_final_costs[max_cost_idx] > restricted_cost_now) {
depth_now = depth_before;
plane_hypotheses[center] = plane_hypotheses[positions[max_cost_idx]];
costs[center] = final_costs[max_cost_idx];
restricted_cost = restricted_final_costs[max_cost_idx];
selected_views[center] = temp_selected_views;
}
}
}
else if (flag[min_cost_idx]) {
float depth_before = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[positions[min_cost_idx]], p);
if (depth_before >= params.depth_min && depth_before <= params.depth_max && final_costs[min_cost_idx] < cost_now) {
depth_now = depth_before;
plane_hypotheses[center] = plane_hypotheses[positions[min_cost_idx]];
costs[center] = final_costs[min_cost_idx];
}
}
}
if (!params.planar_prior && flag[min_cost_idx]) {
float depth_before = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[positions[min_cost_idx]], p);
if (depth_before >= params.depth_min && depth_before <= params.depth_max && final_costs[min_cost_idx] < cost_now) {
depth_now = depth_before;
plane_hypotheses[center] = plane_hypotheses[positions[min_cost_idx]];
costs[center] = final_costs[min_cost_idx];
selected_views[center] = temp_selected_views;
}
}
PlaneHypothesisRefinement(images, depths, cameras, &plane_hypotheses[center], &depth_now, &costs[center], &rand_states[center], view_weights, weight_norm, prior_planes, plane_masks, &restricted_cost, p, params);
}
__global__ void BlackPixelUpdate(cudaTextureObjects *texture_objects, cudaTextureObjects *texture_depths, Camera *cameras, float4 *plane_hypotheses, float *costs, hiprandState_t *rand_states, unsigned int *selected_views, float4 *prior_planes, unsigned int *plane_masks, const PatchMatchParams params, const int iter)
{
int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (threadIdx.x % 2 == 0) {
p.y = p.y * 2;
} else {
p.y = p.y * 2 + 1;
}
CheckerboardPropagation(texture_objects[0].images, texture_depths[0].images, cameras, plane_hypotheses, costs, rand_states, selected_views, prior_planes, plane_masks, p, params, iter);
}
__global__ void RedPixelUpdate(cudaTextureObjects *texture_objects, cudaTextureObjects *texture_depths, Camera *cameras, float4 *plane_hypotheses, float *costs, hiprandState_t *rand_states, unsigned int *selected_views, float4 *prior_planes, unsigned int *plane_masks, const PatchMatchParams params, const int iter)
{
int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (threadIdx.x % 2 == 0) {
p.y = p.y * 2 + 1;
} else {
p.y = p.y * 2;
}
CheckerboardPropagation(texture_objects[0].images, texture_depths[0].images, cameras, plane_hypotheses, costs, rand_states, selected_views, prior_planes, plane_masks, p, params, iter);
}
__global__ void GetDepthandNormal(Camera *cameras, float4 *plane_hypotheses, const PatchMatchParams params)
{
const int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int width = cameras[0].width;
const int height = cameras[0].height;
if (p.x >= width || p.y >= height) {
return;
}
const int center = p.y * width + p.x;
plane_hypotheses[center].w = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[center], p);
plane_hypotheses[center] = TransformNormal(cameras[0], plane_hypotheses[center]);
}
__device__ void CheckerboardFilter(const Camera *cameras, float4 *plane_hypotheses, float *costs, const int2 p)
{
int width = cameras[0].width;
int height = cameras[0].height;
if (p.x >= width || p.y >= height) {
return;
}
const int center = p.y * width + p.x;
float filter[21];
int index = 0;
filter[index++] = plane_hypotheses[center].w;
// Left
const int left = center - 1;
const int leftleft = center - 3;
// Up
const int up = center - width;
const int upup = center - 3 * width;
// Down
const int down = center + width;
const int downdown = center + 3 * width;
// Right
const int right = center + 1;
const int rightright = center + 3;
if (costs[center] < 0.001f) {
return;
}
if (p.y>0) {
filter[index++] = plane_hypotheses[up].w;
}
if (p.y>2) {
filter[index++] = plane_hypotheses[upup].w;
}
if (p.y>4) {
filter[index++] = plane_hypotheses[upup-width*2].w;
}
if (p.y<height-1) {
filter[index++] = plane_hypotheses[down].w;
}
if (p.y<height-3) {
filter[index++] = plane_hypotheses[downdown].w;
}
if (p.y<height-5) {
filter[index++] = plane_hypotheses[downdown+width*2].w;
}
if (p.x>0) {
filter[index++] = plane_hypotheses[left].w;
}
if (p.x>2) {
filter[index++] = plane_hypotheses[leftleft].w;
}
if (p.x>4) {
filter[index++] = plane_hypotheses[leftleft-2].w;
}
if (p.x<width-1) {
filter[index++] = plane_hypotheses[right].w;
}
if (p.x<width-3) {
filter[index++] = plane_hypotheses[rightright].w;
}
if (p.x<width-5) {
filter[index++] = plane_hypotheses[rightright+2].w;
}
if (p.y>0 &&
p.x<width-2) {
filter[index++] = plane_hypotheses[up+2].w;
}
if (p.y< height-1 &&
p.x<width-2) {
filter[index++] = plane_hypotheses[down+2].w;
}
if (p.y>0 &&
p.x>1)
{
filter[index++] = plane_hypotheses[up-2].w;
}
if (p.y<height-1 &&
p.x>1) {
filter[index++] = plane_hypotheses[down-2].w;
}
if (p.x>0 &&
p.y>2)
{
filter[index++] = plane_hypotheses[left - width*2].w;
}
if (p.x<width-1 &&
p.y>2)
{
filter[index++] = plane_hypotheses[right - width*2].w;
}
if (p.x>0 &&
p.y<height-2) {
filter[index++] = plane_hypotheses[left + width*2].w;
}
if (p.x<width-1 &&
p.y<height-2) {
filter[index++] = plane_hypotheses[right + width*2].w;
}
sort_small(filter,index);
int median_index = index / 2;
if (index % 2 == 0) {
plane_hypotheses[center].w = (filter[median_index-1] + filter[median_index]) / 2;
} else {
plane_hypotheses[center].w = filter[median_index];
}
}
__global__ void BlackPixelFilter(const Camera *cameras, float4 *plane_hypotheses, float *costs)
{
int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (threadIdx.x % 2 == 0) {
p.y = p.y * 2;
} else {
p.y = p.y * 2 + 1;
}
CheckerboardFilter(cameras, plane_hypotheses, costs, p);
}
__global__ void RedPixelFilter(const Camera *cameras, float4 *plane_hypotheses, float *costs)
{
int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (threadIdx.x % 2 == 0) {
p.y = p.y * 2 + 1;
} else {
p.y = p.y * 2;
}
CheckerboardFilter(cameras, plane_hypotheses, costs, p);
}
void ACMP::RunPatchMatch()
{
const int width = cameras[0].width;
const int height = cameras[0].height;
// std::cout << width << " " << height << std::endl;
int BLOCK_W = 32;
int BLOCK_H = (BLOCK_W / 2);
dim3 grid_size_randinit;
grid_size_randinit.x = (width + 16 - 1) / 16;
grid_size_randinit.y=(height + 16 - 1) / 16;
grid_size_randinit.z = 1;
dim3 block_size_randinit;
block_size_randinit.x = 16;
block_size_randinit.y = 16;
block_size_randinit.z = 1;
dim3 grid_size_checkerboard;
grid_size_checkerboard.x = (width + BLOCK_W - 1) / BLOCK_W;
grid_size_checkerboard.y= ( (height / 2) + BLOCK_H - 1) / BLOCK_H;
grid_size_checkerboard.z = 1;
dim3 block_size_checkerboard;
block_size_checkerboard.x = BLOCK_W;
block_size_checkerboard.y = BLOCK_H;
block_size_checkerboard.z = 1;
int max_iterations = params.max_iterations;
hipLaunchKernelGGL(( RandomInitialization), dim3(grid_size_randinit), dim3(block_size_randinit), 0, 0, texture_objects_cuda, cameras_cuda, plane_hypotheses_cuda, costs_cuda, rand_states_cuda, selected_views_cuda, prior_planes_cuda, plane_masks_cuda, params);
CUDA_SAFE_CALL(hipDeviceSynchronize());
for (int i = 0; i < max_iterations; ++i) {
hipLaunchKernelGGL(( BlackPixelUpdate), dim3(grid_size_checkerboard), dim3(block_size_checkerboard), 0, 0, texture_objects_cuda, texture_depths_cuda, cameras_cuda, plane_hypotheses_cuda, costs_cuda, rand_states_cuda, selected_views_cuda, prior_planes_cuda, plane_masks_cuda, params, i);
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( RedPixelUpdate), dim3(grid_size_checkerboard), dim3(block_size_checkerboard), 0, 0, texture_objects_cuda, texture_depths_cuda, cameras_cuda, plane_hypotheses_cuda, costs_cuda, rand_states_cuda, selected_views_cuda, prior_planes_cuda, plane_masks_cuda, params, i);
CUDA_SAFE_CALL(hipDeviceSynchronize());
printf("iteration: %d\n", i);
}
hipLaunchKernelGGL(( GetDepthandNormal), dim3(grid_size_randinit), dim3(block_size_randinit), 0, 0, cameras_cuda, plane_hypotheses_cuda, params);
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( BlackPixelFilter), dim3(grid_size_checkerboard), dim3(block_size_checkerboard), 0, 0, cameras_cuda, plane_hypotheses_cuda, costs_cuda);
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( RedPixelFilter), dim3(grid_size_checkerboard), dim3(block_size_checkerboard), 0, 0, cameras_cuda, plane_hypotheses_cuda, costs_cuda);
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipMemcpy(plane_hypotheses_host, plane_hypotheses_cuda, sizeof(float4) * width * height, hipMemcpyDeviceToHost);
hipMemcpy(costs_host, costs_cuda, sizeof(float) * width * height, hipMemcpyDeviceToHost);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
|
d85b318230e070b19525f8b0a205c60cb753cdc8.cu
|
#include "ACMP.h"
__device__ void sort_small(float *d, const int n)
{
int j;
for (int i = 1; i < n; i++) {
float tmp = d[i];
for (j = i; j >= 1 && tmp < d[j-1]; j--)
d[j] = d[j-1];
d[j] = tmp;
}
}
__device__ void sort_small_weighted(float *d, float *w, int n)
{
int j;
for (int i = 1; i < n; i++) {
float tmp = d[i];
float tmp_w = w[i];
for (j = i; j >= 1 && tmp < d[j - 1]; j--) {
d[j] = d[j - 1];
w[j] = w[j - 1];
}
d[j] = tmp;
w[j] = tmp_w;
}
}
__device__ int FindMinCostIndex(const float *costs, const int n)
{
float min_cost = costs[0];
int min_cost_idx = 0;
for (int idx = 1; idx < n; ++idx) {
if (costs[idx] <= min_cost) {
min_cost = costs[idx];
min_cost_idx = idx;
}
}
return min_cost_idx;
}
__device__ int FindMaxCostIndex(const float *costs, const int n)
{
float max_cost = costs[0];
int max_cost_idx = 0;
for (int idx = 1; idx < n; ++idx) {
if (costs[idx] >= max_cost) {
max_cost = costs[idx];
max_cost_idx = idx;
}
}
return max_cost_idx;
}
__device__ void setBit(unsigned int &input, const unsigned int n)
{
input |= (unsigned int)(1 << n);
}
__device__ int isSet(unsigned int input, const unsigned int n)
{
return (input >> n) & 1;
}
__device__ void Mat33DotVec3(const float mat[9], const float4 vec, float4 *result)
{
result->x = mat[0] * vec.x + mat[1] * vec.y + mat[2] * vec.z;
result->y = mat[3] * vec.x + mat[4] * vec.y + mat[5] * vec.z;
result->z = mat[6] * vec.x + mat[7] * vec.y + mat[8] * vec.z;
}
__device__ float Vec3DotVec3(const float4 vec1, const float4 vec2)
{
return vec1.x * vec2.x + vec1.y * vec2.y + vec1.z * vec2.z;
}
__device__ void NormalizeVec3 (float4 *vec)
{
const float normSquared = vec->x * vec->x + vec->y * vec->y + vec->z * vec->z;
const float inverse_sqrt = rsqrtf (normSquared);
vec->x *= inverse_sqrt;
vec->y *= inverse_sqrt;
vec->z *= inverse_sqrt;
}
__device__ void TransformPDFToCDF(float* probs, const int num_probs)
{
float prob_sum = 0.0f;
for (int i = 0; i < num_probs; ++i) {
prob_sum += probs[i];
}
const float inv_prob_sum = 1.0f / prob_sum;
float cum_prob = 0.0f;
for (int i = 0; i < num_probs; ++i) {
const float prob = probs[i] * inv_prob_sum;
cum_prob += prob;
probs[i] = cum_prob;
}
}
__device__ void Get3DPoint(const Camera camera, const int2 p, const float depth, float *X)
{
X[0] = depth * (p.x - camera.K[2]) / camera.K[0];
X[1] = depth * (p.y - camera.K[5]) / camera.K[4];
X[2] = depth;
}
__device__ float4 GetViewDirection(const Camera camera, const int2 p, const float depth)
{
float X[3];
Get3DPoint(camera, p, depth, X);
float norm = sqrt(X[0] * X[0] + X[1] * X[1] + X[2] * X[2]);
float4 view_direction;
view_direction.x = X[0] / norm;
view_direction.y = X[1] / norm;
view_direction.z = X[2] / norm;
view_direction.w = 0;
return view_direction;
}
__device__ float GetDistance2Origin(const Camera camera, const int2 p, const float depth, const float4 normal)
{
float X[3];
Get3DPoint(camera, p, depth, X);
return -(normal.x * X[0] + normal.y * X[1] + normal.z * X[2]);
}
__device__ float ComputeDepthfromPlaneHypothesis(const Camera camera, const float4 plane_hypothesis, const int2 p)
{
return -plane_hypothesis.w * camera.K[0] / ((p.x - camera.K[2]) * plane_hypothesis.x + (camera.K[0] / camera.K[4]) * (p.y - camera.K[5]) * plane_hypothesis.y + camera.K[0] * plane_hypothesis.z);
}
__device__ float4 GenerateRandomNormal(const Camera camera, const int2 p, curandState *rand_state, const float depth)
{
float4 normal;
float q1 = 1.0f;
float q2 = 1.0f;
float s = 2.0f;
while (s >= 1.0f) {
q1 = 2.0f * curand_uniform(rand_state) -1.0f;
q2 = 2.0f * curand_uniform(rand_state) - 1.0f;
s = q1 * q1 + q2 * q2;
}
const float sq = sqrt(1.0f - s);
normal.x = 2.0f * q1 * sq;
normal.y = 2.0f * q2 * sq;
normal.z = 1.0f - 2.0f * s;
normal.w = 0;
float4 view_direction = GetViewDirection(camera, p, depth);
float dot_product = normal.x * view_direction.x + normal.y * view_direction.y + normal.z * view_direction.z;
if (dot_product > 0.0f) {
normal.x = -normal.x;
normal.y = -normal.y;
normal.z = - normal.z;
}
NormalizeVec3(&normal);
return normal;
}
__device__ float4 GeneratePerturbedNormal(const Camera camera, const int2 p, const float4 normal, curandState *rand_state, const float perturbation)
{
float4 view_direction = GetViewDirection(camera, p, 1.0f);
const float a1 = (curand_uniform(rand_state) - 0.5f) * perturbation;
const float a2 = (curand_uniform(rand_state) - 0.5f) * perturbation;
const float a3 = (curand_uniform(rand_state) - 0.5f) * perturbation;
const float sin_a1 = sin(a1);
const float sin_a2 = sin(a2);
const float sin_a3 = sin(a3);
const float cos_a1 = cos(a1);
const float cos_a2 = cos(a2);
const float cos_a3 = cos(a3);
float R[9];
R[0] = cos_a2 * cos_a3;
R[1] = cos_a3 * sin_a1 * sin_a2 - cos_a1 * sin_a3;
R[2] = sin_a1 * sin_a3 + cos_a1 * cos_a3 * sin_a2;
R[3] = cos_a2 * sin_a3;
R[4] = cos_a1 * cos_a3 + sin_a1 * sin_a2 * sin_a3;
R[5] = cos_a1 * sin_a2 * sin_a3 - cos_a3 * sin_a1;
R[6] = -sin_a2;
R[7] = cos_a2 * sin_a1;
R[8] = cos_a1 * cos_a2;
float4 normal_perturbed;
Mat33DotVec3(R, normal, &normal_perturbed);
if (Vec3DotVec3(normal_perturbed, view_direction) >= 0.0f) {
normal_perturbed = normal;
}
NormalizeVec3(&normal_perturbed);
return normal_perturbed;
}
__device__ float4 GenerateRandomPlaneHypothesis(const Camera camera, const int2 p, curandState *rand_state, const float depth_min, const float depth_max)
{
float depth = curand_uniform(rand_state) * (depth_max - depth_min) + depth_min;
float4 plane_hypothesis = GenerateRandomNormal(camera, p, rand_state, depth);
plane_hypothesis.w = GetDistance2Origin(camera, p, depth, plane_hypothesis);
return plane_hypothesis;
}
__device__ float4 GeneratePertubedPlaneHypothesis(const Camera camera, const int2 p, curandState *rand_state, const float perturbation, const float4 plane_hypothesis_now, const float depth_now, const float depth_min, const float depth_max)
{
float depth_perturbed = depth_now;
float dist_perturbed = plane_hypothesis_now.w;
const float dist_min_perturbed = (1 - perturbation) * dist_perturbed;
const float dist_max_perturbed = (1 + perturbation) * dist_perturbed;
float4 plane_hypothesis_temp = plane_hypothesis_now;
do {
dist_perturbed = curand_uniform(rand_state) * (dist_max_perturbed - dist_min_perturbed) + dist_min_perturbed;
plane_hypothesis_temp.w = dist_perturbed;
depth_perturbed = ComputeDepthfromPlaneHypothesis(camera, plane_hypothesis_temp, p);
} while (depth_perturbed < depth_min && depth_perturbed > depth_max);
float4 plane_hypothesis = GeneratePerturbedNormal(camera, p, plane_hypothesis_now, rand_state, perturbation * M_PI);
plane_hypothesis.w = dist_perturbed;
return plane_hypothesis;
}
__device__ void ComputeHomography(const Camera ref_camera, const Camera src_camera, const float4 plane_hypothesis, float *H)
{
float ref_C[3];
float src_C[3];
ref_C[0] = -(ref_camera.R[0] * ref_camera.t[0] + ref_camera.R[3] * ref_camera.t[1] + ref_camera.R[6] * ref_camera.t[2]);
ref_C[1] = -(ref_camera.R[1] * ref_camera.t[0] + ref_camera.R[4] * ref_camera.t[1] + ref_camera.R[7] * ref_camera.t[2]);
ref_C[2] = -(ref_camera.R[2] * ref_camera.t[0] + ref_camera.R[5] * ref_camera.t[1] + ref_camera.R[8] * ref_camera.t[2]);
src_C[0] = -(src_camera.R[0] * src_camera.t[0] + src_camera.R[3] * src_camera.t[1] + src_camera.R[6] * src_camera.t[2]);
src_C[1] = -(src_camera.R[1] * src_camera.t[0] + src_camera.R[4] * src_camera.t[1] + src_camera.R[7] * src_camera.t[2]);
src_C[2] = -(src_camera.R[2] * src_camera.t[0] + src_camera.R[5] * src_camera.t[1] + src_camera.R[8] * src_camera.t[2]);
float R_relative[9];
float C_relative[3];
float t_relative[3];
R_relative[0] = src_camera.R[0] * ref_camera.R[0] + src_camera.R[1] * ref_camera.R[1] + src_camera.R[2] *ref_camera.R[2];
R_relative[1] = src_camera.R[0] * ref_camera.R[3] + src_camera.R[1] * ref_camera.R[4] + src_camera.R[2] *ref_camera.R[5];
R_relative[2] = src_camera.R[0] * ref_camera.R[6] + src_camera.R[1] * ref_camera.R[7] + src_camera.R[2] *ref_camera.R[8];
R_relative[3] = src_camera.R[3] * ref_camera.R[0] + src_camera.R[4] * ref_camera.R[1] + src_camera.R[5] *ref_camera.R[2];
R_relative[4] = src_camera.R[3] * ref_camera.R[3] + src_camera.R[4] * ref_camera.R[4] + src_camera.R[5] *ref_camera.R[5];
R_relative[5] = src_camera.R[3] * ref_camera.R[6] + src_camera.R[4] * ref_camera.R[7] + src_camera.R[5] *ref_camera.R[8];
R_relative[6] = src_camera.R[6] * ref_camera.R[0] + src_camera.R[7] * ref_camera.R[1] + src_camera.R[8] *ref_camera.R[2];
R_relative[7] = src_camera.R[6] * ref_camera.R[3] + src_camera.R[7] * ref_camera.R[4] + src_camera.R[8] *ref_camera.R[5];
R_relative[8] = src_camera.R[6] * ref_camera.R[6] + src_camera.R[7] * ref_camera.R[7] + src_camera.R[8] *ref_camera.R[8];
C_relative[0] = (ref_C[0] - src_C[0]);
C_relative[1] = (ref_C[1] - src_C[1]);
C_relative[2] = (ref_C[2] - src_C[2]);
t_relative[0] = src_camera.R[0] * C_relative[0] + src_camera.R[1] * C_relative[1] + src_camera.R[2] * C_relative[2];
t_relative[1] = src_camera.R[3] * C_relative[0] + src_camera.R[4] * C_relative[1] + src_camera.R[5] * C_relative[2];
t_relative[2] = src_camera.R[6] * C_relative[0] + src_camera.R[7] * C_relative[1] + src_camera.R[8] * C_relative[2];
H[0] = R_relative[0] - t_relative[0] * plane_hypothesis.x / plane_hypothesis.w;
H[1] = R_relative[1] - t_relative[0] * plane_hypothesis.y / plane_hypothesis.w;
H[2] = R_relative[2] - t_relative[0] * plane_hypothesis.z / plane_hypothesis.w;
H[3] = R_relative[3] - t_relative[1] * plane_hypothesis.x / plane_hypothesis.w;
H[4] = R_relative[4] - t_relative[1] * plane_hypothesis.y / plane_hypothesis.w;
H[5] = R_relative[5] - t_relative[1] * plane_hypothesis.z / plane_hypothesis.w;
H[6] = R_relative[6] - t_relative[2] * plane_hypothesis.x / plane_hypothesis.w;
H[7] = R_relative[7] - t_relative[2] * plane_hypothesis.y / plane_hypothesis.w;
H[8] = R_relative[8] - t_relative[2] * plane_hypothesis.z / plane_hypothesis.w;
float tmp[9];
tmp[0] = H[0] / ref_camera.K[0];
tmp[1] = H[1] / ref_camera.K[4];
tmp[2] = -H[0] * ref_camera.K[2] / ref_camera.K[0] - H[1] * ref_camera.K[5] / ref_camera.K[4] + H[2];
tmp[3] = H[3] / ref_camera.K[0];
tmp[4] = H[4] / ref_camera.K[4];
tmp[5] = -H[3] * ref_camera.K[2] / ref_camera.K[0] - H[4] * ref_camera.K[5] / ref_camera.K[4] + H[5];
tmp[6] = H[6] / ref_camera.K[0];
tmp[7] = H[7] / ref_camera.K[4];
tmp[8] = -H[6] * ref_camera.K[2] / ref_camera.K[0] - H[7] * ref_camera.K[5] / ref_camera.K[4] + H[8];
H[0] = src_camera.K[0] * tmp[0] + src_camera.K[2] * tmp[6];
H[1] = src_camera.K[0] * tmp[1] + src_camera.K[2] * tmp[7];
H[2] = src_camera.K[0] * tmp[2] + src_camera.K[2] * tmp[8];
H[3] = src_camera.K[4] * tmp[3] + src_camera.K[5] * tmp[6];
H[4] = src_camera.K[4] * tmp[4] + src_camera.K[5] * tmp[7];
H[5] = src_camera.K[4] * tmp[5] + src_camera.K[5] * tmp[8];
H[6] = src_camera.K[8] * tmp[6];
H[7] = src_camera.K[8] * tmp[7];
H[8] = src_camera.K[8] * tmp[8];
}
__device__ float2 ComputeCorrespondingPoint(const float *H, const int2 p)
{
float3 pt;
pt.x = H[0] * p.x + H[1] * p.y + H[2];
pt.y = H[3] * p.x + H[4] * p.y + H[5];
pt.z = H[6] * p.x + H[7] * p.y + H[8];
return make_float2(pt.x / pt.z, pt.y / pt.z);
}
__device__ float4 TransformNormal(const Camera camera, float4 plane_hypothesis)
{
float4 transformed_normal;
transformed_normal.x = camera.R[0] * plane_hypothesis.x + camera.R[3] * plane_hypothesis.y + camera.R[6] * plane_hypothesis.z;
transformed_normal.y = camera.R[1] * plane_hypothesis.x + camera.R[4] * plane_hypothesis.y + camera.R[7] * plane_hypothesis.z;
transformed_normal.z = camera.R[2] * plane_hypothesis.x + camera.R[5] * plane_hypothesis.y + camera.R[8] * plane_hypothesis.z;
transformed_normal.w = plane_hypothesis.w;
return transformed_normal;
}
__device__ float4 TransformNormal2RefCam(const Camera camera, float4 plane_hypothesis)
{
float4 transformed_normal;
transformed_normal.x = camera.R[0] * plane_hypothesis.x + camera.R[1] * plane_hypothesis.y + camera.R[2] * plane_hypothesis.z;
transformed_normal.y = camera.R[3] * plane_hypothesis.x + camera.R[4] * plane_hypothesis.y + camera.R[5] * plane_hypothesis.z;
transformed_normal.z = camera.R[6] * plane_hypothesis.x + camera.R[7] * plane_hypothesis.y + camera.R[8] * plane_hypothesis.z;
transformed_normal.w = plane_hypothesis.w;
return transformed_normal;
}
__device__ float ComputeBilateralWeight(const float x_dist, const float y_dist, const float pix, const float center_pix, const float sigma_spatial, const float sigma_color)
{
const float spatial_dist = sqrt(x_dist * x_dist + y_dist * y_dist);
const float color_dist = fabs(pix - center_pix);
return exp(-spatial_dist / (2.0f * sigma_spatial* sigma_spatial) - color_dist / (2.0f * sigma_color * sigma_color));
}
__device__ float ComputeBilateralNCC(const cudaTextureObject_t ref_image, const Camera ref_camera, const cudaTextureObject_t src_image, const Camera src_camera, const int2 p, const float4 plane_hypothesis, const PatchMatchParams params)
{
const float cost_max = 2.0f;
int radius = params.patch_size / 2;
float H[9];
ComputeHomography(ref_camera, src_camera, plane_hypothesis, H);
float2 pt = ComputeCorrespondingPoint(H, p);
if (pt.x >= src_camera.width || pt.x < 0.0f || pt.y >= src_camera.height || pt.y < 0.0f) {
return cost_max;
}
float cost = 0.0f;
{
float sum_ref = 0.0f;
float sum_ref_ref = 0.0f;
float sum_src = 0.0f;
float sum_src_src = 0.0f;
float sum_ref_src = 0.0f;
float bilateral_weight_sum = 0.0f;
const float ref_center_pix = tex2D<float>(ref_image, p.x + 0.5f, p.y + 0.5f);
for (int i = -radius; i < radius + 1; i += params.radius_increment) {
float sum_ref_row = 0.0f;
float sum_src_row = 0.0f;
float sum_ref_ref_row = 0.0f;
float sum_src_src_row = 0.0f;
float sum_ref_src_row = 0.0f;
float bilateral_weight_sum_row = 0.0f;
for (int j = -radius; j < radius + 1; j += params.radius_increment) {
const int2 ref_pt = make_int2(p.x + i, p.y + j);
const float ref_pix = tex2D<float>(ref_image, ref_pt.x + 0.5f, ref_pt.y + 0.5f);
float2 src_pt = ComputeCorrespondingPoint(H, ref_pt);
const float src_pix = tex2D<float>(src_image, src_pt.x + 0.5f, src_pt.y + 0.5f);
float weight = ComputeBilateralWeight(i, j, ref_pix, ref_center_pix, params.sigma_spatial, params.sigma_color);
sum_ref_row += weight * ref_pix;
sum_ref_ref_row += weight * ref_pix * ref_pix;
sum_src_row += weight * src_pix;
sum_src_src_row += weight * src_pix * src_pix;
sum_ref_src_row += weight * ref_pix * src_pix;
bilateral_weight_sum_row += weight;
}
sum_ref += sum_ref_row;
sum_ref_ref += sum_ref_ref_row;
sum_src += sum_src_row;
sum_src_src += sum_src_src_row;
sum_ref_src += sum_ref_src_row;
bilateral_weight_sum += bilateral_weight_sum_row;
}
const float inv_bilateral_weight_sum = 1.0f / bilateral_weight_sum;
sum_ref *= inv_bilateral_weight_sum;
sum_ref_ref *= inv_bilateral_weight_sum;
sum_src *= inv_bilateral_weight_sum;
sum_src_src *= inv_bilateral_weight_sum;
sum_ref_src *= inv_bilateral_weight_sum;
const float var_ref = sum_ref_ref - sum_ref * sum_ref;
const float var_src = sum_src_src - sum_src * sum_src;
const float kMinVar = 1e-5f;
if (var_ref < kMinVar || var_src < kMinVar) {
return cost = cost_max;
} else {
const float covar_src_ref = sum_ref_src - sum_ref * sum_src;
const float var_ref_src = sqrt(var_ref * var_src);
return cost = max(0.0f, min(cost_max, 1.0f - covar_src_ref / var_ref_src));
}
}
}
__device__ float ComputeMultiViewInitialCostandSelectedViews(const cudaTextureObject_t *images, const Camera *cameras, const int2 p, const float4 plane_hypothesis, unsigned int *selected_views, const PatchMatchParams params)
{
float cost_max = 2.0f;
float cost_vector[32] = {2.0f};
float cost_vector_copy[32] = {2.0f};
int cost_count = 0;
int num_valid_views = 0;
for (int i = 1; i < params.num_images; ++i) {
float c = ComputeBilateralNCC(images[0], cameras[0], images[i], cameras[i], p, plane_hypothesis, params);
cost_vector[i - 1] = c;
cost_vector_copy[i - 1] = c;
cost_count++;
if (c < cost_max) {
num_valid_views++;
}
}
sort_small(cost_vector, cost_count);
*selected_views = 0;
int top_k = min(num_valid_views, params.top_k);
if (top_k > 0) {
float cost = 0.0f;
for (int i = 0; i < top_k; ++i) {
cost += cost_vector[i];
}
float cost_threshold = cost_vector[top_k - 1];
for (int i = 0; i < params.num_images - 1; ++i) {
if (cost_vector_copy[i] <= cost_threshold) {
setBit(*selected_views, i);
}
}
return cost / top_k;
} else {
return cost_max;
}
}
__device__ void ComputeMultiViewCostVector(const cudaTextureObject_t *images, const Camera *cameras, const int2 p, const float4 plane_hypothesis, float *cost_vector, const PatchMatchParams params)
{
for (int i = 1; i < params.num_images; ++i) {
cost_vector[i - 1] = ComputeBilateralNCC(images[0], cameras[0], images[i], cameras[i], p, plane_hypothesis, params);
}
}
__device__ float3 Get3DPointonWorld_cu(const float x, const float y, const float depth, const Camera camera)
{
float3 pointX;
float3 tmpX;
// Reprojection
pointX.x = depth * (x - camera.K[2]) / camera.K[0];
pointX.y = depth * (y - camera.K[5]) / camera.K[4];
pointX.z = depth;
// Rotation
tmpX.x = camera.R[0] * pointX.x + camera.R[3] * pointX.y + camera.R[6] * pointX.z;
tmpX.y = camera.R[1] * pointX.x + camera.R[4] * pointX.y + camera.R[7] * pointX.z;
tmpX.z = camera.R[2] * pointX.x + camera.R[5] * pointX.y + camera.R[8] * pointX.z;
// Transformation
float3 C;
C.x = -(camera.R[0] * camera.t[0] + camera.R[3] * camera.t[1] + camera.R[6] * camera.t[2]);
C.y = -(camera.R[1] * camera.t[0] + camera.R[4] * camera.t[1] + camera.R[7] * camera.t[2]);
C.z = -(camera.R[2] * camera.t[0] + camera.R[5] * camera.t[1] + camera.R[8] * camera.t[2]);
pointX.x = tmpX.x + C.x;
pointX.y = tmpX.y + C.y;
pointX.z = tmpX.z + C.z;
return pointX;
}
__device__ void ProjectonCamera_cu(const float3 PointX, const Camera camera, float2 &point, float &depth)
{
float3 tmp;
tmp.x = camera.R[0] * PointX.x + camera.R[1] * PointX.y + camera.R[2] * PointX.z + camera.t[0];
tmp.y = camera.R[3] * PointX.x + camera.R[4] * PointX.y + camera.R[5] * PointX.z + camera.t[1];
tmp.z = camera.R[6] * PointX.x + camera.R[7] * PointX.y + camera.R[8] * PointX.z + camera.t[2];
depth = camera.K[6] * tmp.x + camera.K[7] * tmp.y + camera.K[8] * tmp.z;
point.x = (camera.K[0] * tmp.x + camera.K[1] * tmp.y + camera.K[2] * tmp.z) / depth;
point.y = (camera.K[3] * tmp.x + camera.K[4] * tmp.y + camera.K[5] * tmp.z) / depth;
}
__device__ float ComputeGeomConsistencyCost(const cudaTextureObject_t depth_image, const Camera ref_camera, const Camera src_camera, const float4 plane_hypothesis, const int2 p)
{
const float max_cost = 5.0f;
float depth = ComputeDepthfromPlaneHypothesis(ref_camera, plane_hypothesis, p);
float3 forward_point = Get3DPointonWorld_cu(p.x, p.y, depth, ref_camera);
float2 src_pt;
float src_d;
ProjectonCamera_cu(forward_point, src_camera, src_pt, src_d);
const float src_depth = tex2D<float>(depth_image, (int)src_pt.x + 0.5f, (int)src_pt.y + 0.5f);
if (src_depth == 0.0f) {
return max_cost;
}
float3 src_3D_pt = Get3DPointonWorld_cu(src_pt.x, src_pt.y, src_depth, src_camera);
float2 backward_point;
float ref_d;
ProjectonCamera_cu(src_3D_pt, ref_camera, backward_point, ref_d);
const float diff_col = p.x - backward_point.x;
const float diff_row = p.y - backward_point.y;
return min(max_cost, sqrt(diff_col * diff_col + diff_row * diff_row));
}
__global__ void RandomInitialization(cudaTextureObjects *texture_objects, Camera *cameras, float4 *plane_hypotheses, float *costs, curandState *rand_states, unsigned int *selected_views, float4 *prior_planes, unsigned int *plane_masks, const PatchMatchParams params)
{
const int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
int width = cameras[0].width;
int height = cameras[0].height;
if (p.x >= width || p.y >= height) {
return;
}
const int center = p.y * width + p.x;
curand_init(clock64(), p.y, p.x, &rand_states[center]);
if (params.geom_consistency) {
float4 plane_hypothesis = plane_hypotheses[center];
plane_hypothesis = TransformNormal2RefCam(cameras[0], plane_hypothesis);
float depth = plane_hypothesis.w;
plane_hypothesis.w = GetDistance2Origin(cameras[0], p, depth, plane_hypothesis);
plane_hypotheses[center] = plane_hypothesis;
costs[center] = ComputeMultiViewInitialCostandSelectedViews(texture_objects[0].images, cameras, p, plane_hypotheses[center], &selected_views[center], params);
}
else if (params.planar_prior) {
if (plane_masks[center] > 0 && costs[center] >= 0.1f) {
float perturbation = 0.02f;
float4 plane_hypothesis = prior_planes[center];
float depth_perturbed = plane_hypothesis.w;
const float depth_min_perturbed = (1 - 3 * perturbation) * depth_perturbed;
const float depth_max_perturbed = (1 + 3 * perturbation) * depth_perturbed;
depth_perturbed = curand_uniform(&rand_states[center]) * (depth_max_perturbed - depth_min_perturbed) + depth_min_perturbed;
float4 plane_hypothesis_perturbed = GeneratePerturbedNormal(cameras[0], p, plane_hypothesis, &rand_states[center], 3 * perturbation * M_PI);
plane_hypothesis_perturbed.w = depth_perturbed;
plane_hypotheses[center] = plane_hypothesis_perturbed;
costs[center] = ComputeMultiViewInitialCostandSelectedViews(texture_objects[0].images, cameras, p, plane_hypotheses[center], &selected_views[center], params);
}
else {
float4 plane_hypothesis = plane_hypotheses[center];
float depth = plane_hypothesis.w;
plane_hypothesis.w = GetDistance2Origin(cameras[0], p, depth, plane_hypothesis);
plane_hypotheses[center] = plane_hypothesis;
costs[center] = ComputeMultiViewInitialCostandSelectedViews(texture_objects[0].images, cameras, p, plane_hypotheses[center], &selected_views[center], params);
}
}
else {
plane_hypotheses[center] = GenerateRandomPlaneHypothesis(cameras[0], p, &rand_states[center], params.depth_min, params.depth_max);
costs[center] = ComputeMultiViewInitialCostandSelectedViews(texture_objects[0].images, cameras, p, plane_hypotheses[center], &selected_views[center], params);
}
}
__device__ void PlaneHypothesisRefinement(const cudaTextureObject_t *images, const cudaTextureObject_t *depth_images, const Camera *cameras, float4 *plane_hypothesis, float *depth, float *cost, curandState *rand_state, const float *view_weights, const float weight_norm, float4 *prior_planes, unsigned int *plane_masks, float *restricted_cost, const int2 p, const PatchMatchParams params)
{
float perturbation = 0.02f;
const int center = p.y * cameras[0].width + p.x;
float gamma = 0.5f;
float depth_sigma = (params.depth_max - params.depth_min) / 64.0f;
float two_depth_sigma_squared = 2 * depth_sigma * depth_sigma;
float angle_sigma = M_PI * (5.0f / 180.0f);
float two_angle_sigma_squared = 2 * angle_sigma * angle_sigma;
float beta = 0.18f;
float depth_prior = 0.0f;
float depth_rand;
float4 plane_hypothesis_rand;
if (params.planar_prior && plane_masks[center] > 0) {
depth_prior = ComputeDepthfromPlaneHypothesis(cameras[0], prior_planes[center], p);
depth_rand = curand_uniform(rand_state) * 6 * depth_sigma + (depth_prior - 3 * depth_sigma);
plane_hypothesis_rand = GeneratePerturbedNormal(cameras[0], p, prior_planes[center], rand_state, angle_sigma);
}
else {
depth_rand = curand_uniform(rand_state) * (params.depth_max - params.depth_min) + params.depth_min;
plane_hypothesis_rand = GenerateRandomNormal(cameras[0], p, rand_state, *depth);
}
float depth_perturbed = *depth;
const float depth_min_perturbed = (1 - perturbation) * depth_perturbed;
const float depth_max_perturbed = (1 + perturbation) * depth_perturbed;
do {
depth_perturbed = curand_uniform(rand_state) * (depth_max_perturbed - depth_min_perturbed) + depth_min_perturbed;
} while (depth_perturbed < params.depth_min && depth_perturbed > params.depth_max);
float4 plane_hypothesis_perturbed = GeneratePerturbedNormal(cameras[0], p, *plane_hypothesis, rand_state, perturbation * M_PI); // GeneratePertubedPlaneHypothesis(cameras[0], p, rand_state, perturbation, *plane_hypothesis, *depth, params.depth_min, params.depth_max);
const int num_planes = 5;
float depths[num_planes] = {depth_rand, *depth, depth_rand, *depth, depth_perturbed};
float4 normals[num_planes] = {*plane_hypothesis, plane_hypothesis_rand, plane_hypothesis_rand, plane_hypothesis_perturbed, *plane_hypothesis};
for (int i = 0; i < num_planes; ++i) {
float cost_vector[32] = {2.0f};
float4 temp_plane_hypothesis = normals[i];
temp_plane_hypothesis.w = GetDistance2Origin(cameras[0], p, depths[i], temp_plane_hypothesis); // dists[i];
ComputeMultiViewCostVector(images, cameras, p, temp_plane_hypothesis, cost_vector, params);
float temp_cost = 0.0f;
for (int j = 0; j < params.num_images - 1; ++j) {
if (view_weights[j] > 0) {
if (params.geom_consistency) {
temp_cost += view_weights[j] * (cost_vector[j] + 0.1f * ComputeGeomConsistencyCost(depth_images[j+1], cameras[0], cameras[j+1], temp_plane_hypothesis, p));
}
else {
temp_cost += view_weights[j] * cost_vector[j];
}
}
}
temp_cost /= weight_norm;
float depth_before = ComputeDepthfromPlaneHypothesis(cameras[0], temp_plane_hypothesis, p);
if (params.planar_prior && plane_masks[center] > 0) {
float depth_diff = depths[i] - depth_prior;
float angle_cos = Vec3DotVec3(prior_planes[center], temp_plane_hypothesis);
float angle_diff = acos(angle_cos);
float prior = gamma + exp(- depth_diff * depth_diff / two_depth_sigma_squared) * exp(- angle_diff * angle_diff / two_angle_sigma_squared);
float restricted_temp_cost = exp(-temp_cost * temp_cost / beta) * prior;
if (depth_before >= params.depth_min && depth_before <= params.depth_max && restricted_temp_cost > *restricted_cost) {
*depth = depth_before;
*plane_hypothesis = temp_plane_hypothesis;
*cost = temp_cost;
*restricted_cost = restricted_temp_cost;
}
}
else {
if (depth_before >= params.depth_min && depth_before <= params.depth_max && temp_cost < *cost) {
*depth = depth_before;
*plane_hypothesis = temp_plane_hypothesis;
*cost = temp_cost;
}
}
}
}
__device__ void CheckerboardPropagation(const cudaTextureObject_t *images, const cudaTextureObject_t *depths, const Camera *cameras, float4 *plane_hypotheses, float *costs, curandState *rand_states, unsigned int *selected_views, float4 *prior_planes, unsigned int *plane_masks, const int2 p, const PatchMatchParams params, const int iter)
{
int width = cameras[0].width;
int height = cameras[0].height;
if (p.x >= width || p.y >= height) {
return;
}
const int center = p.y * width + p.x;
int left_near = center - 1;
int left_far = center - 3;
int right_near = center + 1;
int right_far = center + 3;
int up_near = center - width;
int up_far = center - 3 * width;
int down_near = center + width;
int down_far = center + 3 * width;
// Adaptive Checkerboard Sampling
float cost_array[8][32] = {2.0f};
// 0 -- up_near, 1 -- up_far, 2 -- down_near, 3 -- down_far, 4 -- left_near, 5 -- left_far, 6 -- right_near, 7 -- right_far
bool flag[8] = {false};
int num_valid_pixels = 0;
float costMin;
int costMinPoint;
// up_far
if (p.y > 2) {
flag[1] = true;
num_valid_pixels++;
costMin = costs[up_far];
costMinPoint = up_far;
for (int i = 1; i < 11; ++i) {
if (p.y > 2 + 2 * i) {
int pointTemp = up_far - 2 * i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
up_far = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[up_far], cost_array[1], params);
}
// dwon_far
if (p.y < height - 3) {
flag[3] = true;
num_valid_pixels++;
costMin = costs[down_far];
costMinPoint = down_far;
for (int i = 1; i < 11; ++i) {
if (p.y < height - 3 - 2 * i) {
int pointTemp = down_far + 2 * i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
down_far = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[down_far], cost_array[3], params);
}
// left_far
if (p.x > 2) {
flag[5] = true;
num_valid_pixels++;
costMin = costs[left_far];
costMinPoint = left_far;
for (int i = 1; i < 11; ++i) {
if (p.x > 2 + 2 * i) {
int pointTemp = left_far - 2 * i;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
left_far = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[left_far], cost_array[5], params);
}
// right_far
if (p.x < width - 3) {
flag[7] = true;
num_valid_pixels++;
costMin = costs[right_far];
costMinPoint = right_far;
for (int i = 1; i < 11; ++i) {
if (p.x < width - 3 - 2 * i) {
int pointTemp = right_far + 2 * i;
if (costMin < costs[pointTemp]) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
right_far = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[right_far], cost_array[7], params);
}
// up_near
if (p.y > 0) {
flag[0] = true;
num_valid_pixels++;
costMin = costs[up_near];
costMinPoint = up_near;
for (int i = 0; i < 3; ++i) {
if (p.y > 1 + i && p.x > i) {
int pointTemp = up_near - (1 + i) * width - i;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
if (p.y > 1 + i && p.x < width - 1 - i) {
int pointTemp = up_near - (1 + i) * width + i;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
up_near = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[up_near], cost_array[0], params);
}
// down_near
if (p.y < height - 1) {
flag[2] = true;
num_valid_pixels++;
costMin = costs[down_near];
costMinPoint = down_near;
for (int i = 0; i < 3; ++i) {
if (p.y < height - 2 - i && p.x > i) {
int pointTemp = down_near + (1 + i) * width - i;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
if (p.y < height - 2 - i && p.x < width - 1 - i) {
int pointTemp = down_near + (1 + i) * width + i;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
down_near = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[down_near], cost_array[2], params);
}
// left_near
if (p.x > 0) {
flag[4] = true;
num_valid_pixels++;
costMin = costs[left_near];
costMinPoint = left_near;
for (int i = 0; i < 3; ++i) {
if (p.x > 1 + i && p.y > i) {
int pointTemp = left_near - (1 + i) - i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
if (p.x > 1 + i && p.y < height - 1 - i) {
int pointTemp = left_near - (1 + i) + i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
left_near = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[left_near], cost_array[4], params);
}
// right_near
if (p.x < width - 1) {
flag[6] = true;
num_valid_pixels++;
costMin = costs[right_near];
costMinPoint = right_near;
for (int i = 0; i < 3; ++i) {
if (p.x < width - 2 - i && p.y > i) {
int pointTemp = right_near + (1 + i) - i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
if (p.x < width - 2 - i && p.y < height - 1- i) {
int pointTemp = right_near + (1 + i) + i * width;
if (costs[pointTemp] < costMin) {
costMin = costs[pointTemp];
costMinPoint = pointTemp;
}
}
}
right_near = costMinPoint;
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[right_near], cost_array[6], params);
}
const int positions[8] = {up_near, up_far, down_near, down_far, left_near, left_far, right_near, right_far};
// Multi-hypothesis Joint View Selection
float view_weights[32] = {0.0f};
float view_selection_priors[32] = {0.0f};
int neighbor_positions[4] = {center - width, center + width, center - 1, center + 1};
for (int i = 0; i < 4; ++i) {
if (flag[2 * i]) {
for (int j = 0; j < params.num_images - 1; ++j) {
if (isSet(selected_views[neighbor_positions[i]], j) == 1) {
view_selection_priors[j] += 0.9f;
} else {
view_selection_priors[j] += 0.1f;
}
}
}
}
float sampling_probs[32] = {0.0f};
float cost_threshold = 0.8 * expf((iter) * (iter) / (-90.0f));
for (int i = 0; i < params.num_images - 1; i++) {
float count = 0;
int count_false = 0;
float tmpw = 0;
for (int j = 0; j < 8; j++) {
if (cost_array[j][i] < cost_threshold) {
tmpw += expf(cost_array[j][i] * cost_array[j][i] / (-0.18f));
count++;
}
if (cost_array[j][i] > 1.2f) {
count_false++;
}
}
if (count > 2 && count_false < 3) {
sampling_probs[i] = tmpw / count;
}
else if (count_false < 3) {
sampling_probs[i] = expf(cost_threshold * cost_threshold / (-0.32f));
}
sampling_probs[i] = sampling_probs[i] * view_selection_priors[i];
}
TransformPDFToCDF(sampling_probs, params.num_images - 1);
for (int sample = 0; sample < 15; ++sample) {
const float rand_prob = curand_uniform(&rand_states[center]) - FLT_EPSILON;
for (int image_id = 0; image_id < params.num_images - 1; ++image_id) {
const float prob = sampling_probs[image_id];
if (prob > rand_prob) {
view_weights[image_id] += 1.0f;
break;
}
}
}
unsigned int temp_selected_views = 0;
int num_selected_view = 0;
float weight_norm = 0;
for (int i = 0; i < params.num_images - 1; ++i) {
if (view_weights[i] > 0) {
setBit(temp_selected_views, i);
weight_norm += view_weights[i];
num_selected_view++;
}
}
float final_costs[8] = {0.0f};
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < params.num_images - 1; ++j) {
if (view_weights[j] > 0) {
if (params.geom_consistency) {
if (flag[i]) {
final_costs[i] += view_weights[j] * (cost_array[i][j] + 0.1f * ComputeGeomConsistencyCost(depths[j+1], cameras[0], cameras[j+1], plane_hypotheses[positions[i]], p));
}
else {
final_costs[i] += view_weights[j] * (cost_array[i][j] + 0.1f * 5.0f);
}
}
else {
final_costs[i] += view_weights[j] * cost_array[i][j];
}
}
}
final_costs[i] /= weight_norm;
}
const int min_cost_idx = FindMinCostIndex(final_costs, 8);
float cost_vector_now[32] = {2.0f};
ComputeMultiViewCostVector(images, cameras, p, plane_hypotheses[center], cost_vector_now, params);
float cost_now = 0.0f;
for (int i = 0; i < params.num_images - 1; ++i) {
if (params.geom_consistency) {
cost_now += view_weights[i] * (cost_vector_now[i] + 0.1f * ComputeGeomConsistencyCost(depths[i+1], cameras[0], cameras[i+1], plane_hypotheses[center], p));
}
else {
cost_now += view_weights[i] * cost_vector_now[i];
}
}
cost_now /= weight_norm;
costs[center] = cost_now;
float depth_now = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[center], p);
float restricted_cost = 0.0f;
if (params.planar_prior) {
float restricted_final_costs[8] = {0.0f};
float gamma = 0.5f;
float depth_sigma = (params.depth_max - params.depth_min) / 64.0f;
float two_depth_sigma_squared = 2 * depth_sigma * depth_sigma;
float angle_sigma = M_PI * (5.0f / 180.0f);
float two_angle_sigma_squared = 2 * angle_sigma * angle_sigma;
float depth_prior = ComputeDepthfromPlaneHypothesis(cameras[0], prior_planes[center], p);
float beta = 0.18f;
if (plane_masks[center] > 0) {
for (int i = 0; i < 8; i++) {
if (flag[i]) {
float depth_now = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[positions[i]], p);
float depth_diff = depth_now - depth_prior;
float angle_cos = Vec3DotVec3(prior_planes[center], plane_hypotheses[positions[i]]);
float angle_diff = acos(angle_cos);
float prior = gamma + exp(- depth_diff * depth_diff / two_depth_sigma_squared) * exp(- angle_diff * angle_diff / two_angle_sigma_squared);
restricted_final_costs[i] = exp(-final_costs[i] * final_costs[i] / beta) * prior;
}
}
const int max_cost_idx = FindMaxCostIndex(restricted_final_costs, 8);
float restricted_cost_now = 0.0f;
float depth_now = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[center], p);
float depth_diff = depth_now - depth_prior;
float angle_cos = Vec3DotVec3(prior_planes[center], plane_hypotheses[center]);
float angle_diff = acos(angle_cos);
float prior = gamma + exp(- depth_diff * depth_diff / two_depth_sigma_squared) * exp(- angle_diff * angle_diff / two_angle_sigma_squared);
restricted_cost_now = exp(-cost_now * cost_now / beta) * prior;
if (flag[max_cost_idx]) {
float depth_before = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[positions[max_cost_idx]], p);
if (depth_before >= params.depth_min && depth_before <= params.depth_max && restricted_final_costs[max_cost_idx] > restricted_cost_now) {
depth_now = depth_before;
plane_hypotheses[center] = plane_hypotheses[positions[max_cost_idx]];
costs[center] = final_costs[max_cost_idx];
restricted_cost = restricted_final_costs[max_cost_idx];
selected_views[center] = temp_selected_views;
}
}
}
else if (flag[min_cost_idx]) {
float depth_before = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[positions[min_cost_idx]], p);
if (depth_before >= params.depth_min && depth_before <= params.depth_max && final_costs[min_cost_idx] < cost_now) {
depth_now = depth_before;
plane_hypotheses[center] = plane_hypotheses[positions[min_cost_idx]];
costs[center] = final_costs[min_cost_idx];
}
}
}
if (!params.planar_prior && flag[min_cost_idx]) {
float depth_before = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[positions[min_cost_idx]], p);
if (depth_before >= params.depth_min && depth_before <= params.depth_max && final_costs[min_cost_idx] < cost_now) {
depth_now = depth_before;
plane_hypotheses[center] = plane_hypotheses[positions[min_cost_idx]];
costs[center] = final_costs[min_cost_idx];
selected_views[center] = temp_selected_views;
}
}
PlaneHypothesisRefinement(images, depths, cameras, &plane_hypotheses[center], &depth_now, &costs[center], &rand_states[center], view_weights, weight_norm, prior_planes, plane_masks, &restricted_cost, p, params);
}
__global__ void BlackPixelUpdate(cudaTextureObjects *texture_objects, cudaTextureObjects *texture_depths, Camera *cameras, float4 *plane_hypotheses, float *costs, curandState *rand_states, unsigned int *selected_views, float4 *prior_planes, unsigned int *plane_masks, const PatchMatchParams params, const int iter)
{
int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (threadIdx.x % 2 == 0) {
p.y = p.y * 2;
} else {
p.y = p.y * 2 + 1;
}
CheckerboardPropagation(texture_objects[0].images, texture_depths[0].images, cameras, plane_hypotheses, costs, rand_states, selected_views, prior_planes, plane_masks, p, params, iter);
}
__global__ void RedPixelUpdate(cudaTextureObjects *texture_objects, cudaTextureObjects *texture_depths, Camera *cameras, float4 *plane_hypotheses, float *costs, curandState *rand_states, unsigned int *selected_views, float4 *prior_planes, unsigned int *plane_masks, const PatchMatchParams params, const int iter)
{
int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (threadIdx.x % 2 == 0) {
p.y = p.y * 2 + 1;
} else {
p.y = p.y * 2;
}
CheckerboardPropagation(texture_objects[0].images, texture_depths[0].images, cameras, plane_hypotheses, costs, rand_states, selected_views, prior_planes, plane_masks, p, params, iter);
}
__global__ void GetDepthandNormal(Camera *cameras, float4 *plane_hypotheses, const PatchMatchParams params)
{
const int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int width = cameras[0].width;
const int height = cameras[0].height;
if (p.x >= width || p.y >= height) {
return;
}
const int center = p.y * width + p.x;
plane_hypotheses[center].w = ComputeDepthfromPlaneHypothesis(cameras[0], plane_hypotheses[center], p);
plane_hypotheses[center] = TransformNormal(cameras[0], plane_hypotheses[center]);
}
__device__ void CheckerboardFilter(const Camera *cameras, float4 *plane_hypotheses, float *costs, const int2 p)
{
int width = cameras[0].width;
int height = cameras[0].height;
if (p.x >= width || p.y >= height) {
return;
}
const int center = p.y * width + p.x;
float filter[21];
int index = 0;
filter[index++] = plane_hypotheses[center].w;
// Left
const int left = center - 1;
const int leftleft = center - 3;
// Up
const int up = center - width;
const int upup = center - 3 * width;
// Down
const int down = center + width;
const int downdown = center + 3 * width;
// Right
const int right = center + 1;
const int rightright = center + 3;
if (costs[center] < 0.001f) {
return;
}
if (p.y>0) {
filter[index++] = plane_hypotheses[up].w;
}
if (p.y>2) {
filter[index++] = plane_hypotheses[upup].w;
}
if (p.y>4) {
filter[index++] = plane_hypotheses[upup-width*2].w;
}
if (p.y<height-1) {
filter[index++] = plane_hypotheses[down].w;
}
if (p.y<height-3) {
filter[index++] = plane_hypotheses[downdown].w;
}
if (p.y<height-5) {
filter[index++] = plane_hypotheses[downdown+width*2].w;
}
if (p.x>0) {
filter[index++] = plane_hypotheses[left].w;
}
if (p.x>2) {
filter[index++] = plane_hypotheses[leftleft].w;
}
if (p.x>4) {
filter[index++] = plane_hypotheses[leftleft-2].w;
}
if (p.x<width-1) {
filter[index++] = plane_hypotheses[right].w;
}
if (p.x<width-3) {
filter[index++] = plane_hypotheses[rightright].w;
}
if (p.x<width-5) {
filter[index++] = plane_hypotheses[rightright+2].w;
}
if (p.y>0 &&
p.x<width-2) {
filter[index++] = plane_hypotheses[up+2].w;
}
if (p.y< height-1 &&
p.x<width-2) {
filter[index++] = plane_hypotheses[down+2].w;
}
if (p.y>0 &&
p.x>1)
{
filter[index++] = plane_hypotheses[up-2].w;
}
if (p.y<height-1 &&
p.x>1) {
filter[index++] = plane_hypotheses[down-2].w;
}
if (p.x>0 &&
p.y>2)
{
filter[index++] = plane_hypotheses[left - width*2].w;
}
if (p.x<width-1 &&
p.y>2)
{
filter[index++] = plane_hypotheses[right - width*2].w;
}
if (p.x>0 &&
p.y<height-2) {
filter[index++] = plane_hypotheses[left + width*2].w;
}
if (p.x<width-1 &&
p.y<height-2) {
filter[index++] = plane_hypotheses[right + width*2].w;
}
sort_small(filter,index);
int median_index = index / 2;
if (index % 2 == 0) {
plane_hypotheses[center].w = (filter[median_index-1] + filter[median_index]) / 2;
} else {
plane_hypotheses[center].w = filter[median_index];
}
}
__global__ void BlackPixelFilter(const Camera *cameras, float4 *plane_hypotheses, float *costs)
{
int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (threadIdx.x % 2 == 0) {
p.y = p.y * 2;
} else {
p.y = p.y * 2 + 1;
}
CheckerboardFilter(cameras, plane_hypotheses, costs, p);
}
__global__ void RedPixelFilter(const Camera *cameras, float4 *plane_hypotheses, float *costs)
{
int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (threadIdx.x % 2 == 0) {
p.y = p.y * 2 + 1;
} else {
p.y = p.y * 2;
}
CheckerboardFilter(cameras, plane_hypotheses, costs, p);
}
void ACMP::RunPatchMatch()
{
const int width = cameras[0].width;
const int height = cameras[0].height;
// std::cout << width << " " << height << std::endl;
int BLOCK_W = 32;
int BLOCK_H = (BLOCK_W / 2);
dim3 grid_size_randinit;
grid_size_randinit.x = (width + 16 - 1) / 16;
grid_size_randinit.y=(height + 16 - 1) / 16;
grid_size_randinit.z = 1;
dim3 block_size_randinit;
block_size_randinit.x = 16;
block_size_randinit.y = 16;
block_size_randinit.z = 1;
dim3 grid_size_checkerboard;
grid_size_checkerboard.x = (width + BLOCK_W - 1) / BLOCK_W;
grid_size_checkerboard.y= ( (height / 2) + BLOCK_H - 1) / BLOCK_H;
grid_size_checkerboard.z = 1;
dim3 block_size_checkerboard;
block_size_checkerboard.x = BLOCK_W;
block_size_checkerboard.y = BLOCK_H;
block_size_checkerboard.z = 1;
int max_iterations = params.max_iterations;
RandomInitialization<<<grid_size_randinit, block_size_randinit>>>(texture_objects_cuda, cameras_cuda, plane_hypotheses_cuda, costs_cuda, rand_states_cuda, selected_views_cuda, prior_planes_cuda, plane_masks_cuda, params);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
for (int i = 0; i < max_iterations; ++i) {
BlackPixelUpdate<<<grid_size_checkerboard, block_size_checkerboard>>>(texture_objects_cuda, texture_depths_cuda, cameras_cuda, plane_hypotheses_cuda, costs_cuda, rand_states_cuda, selected_views_cuda, prior_planes_cuda, plane_masks_cuda, params, i);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
RedPixelUpdate<<<grid_size_checkerboard, block_size_checkerboard>>>(texture_objects_cuda, texture_depths_cuda, cameras_cuda, plane_hypotheses_cuda, costs_cuda, rand_states_cuda, selected_views_cuda, prior_planes_cuda, plane_masks_cuda, params, i);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
printf("iteration: %d\n", i);
}
GetDepthandNormal<<<grid_size_randinit, block_size_randinit>>>(cameras_cuda, plane_hypotheses_cuda, params);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
BlackPixelFilter<<<grid_size_checkerboard, block_size_checkerboard>>>(cameras_cuda, plane_hypotheses_cuda, costs_cuda);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
RedPixelFilter<<<grid_size_checkerboard, block_size_checkerboard>>>(cameras_cuda, plane_hypotheses_cuda, costs_cuda);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
cudaMemcpy(plane_hypotheses_host, plane_hypotheses_cuda, sizeof(float4) * width * height, cudaMemcpyDeviceToHost);
cudaMemcpy(costs_host, costs_cuda, sizeof(float) * width * height, cudaMemcpyDeviceToHost);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
}
|
4c3eb2385a3bff698b43c736eba9fe2136b56a54.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <type_traits>
#include <mma.h>
#include <wmma_extension/wmma_extension.hpp>
constexpr std::size_t block_size = 256;
constexpr unsigned warp_size = 32;
#ifndef CUDA_ARCH_SM
#define CUDA_ARCH_SM 0
#endif
template <bool UseWMMAe>
__global__ void batched_direct_product_16x16(float* const c_ptr, const half* const u_ptr);
template <>
__global__ void batched_direct_product_16x16<true>(float* const c_ptr, const half* const u_ptr) {
constexpr unsigned DIM = 32;
constexpr unsigned FDIM = 16;
__shared__ half u_smem[block_size];
__shared__ float c_smem[block_size * DIM];
const unsigned warp_id = threadIdx.x >> 5;
half* const u_smem_ptr = u_smem + warp_id * DIM;
float* const c_smem_ptr = c_ptr + warp_id * DIM * DIM;
u_smem[threadIdx.x] = u_ptr[blockIdx.x * block_size + threadIdx.x];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, FDIM, FDIM, FDIM, half, nvcuda::wmma::col_major> a_frag[2];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, FDIM, FDIM, FDIM, half, nvcuda::wmma::row_major> b_frag[2];
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, FDIM, FDIM, FDIM, float> c_frag[4];
mtk::wmma::load_vector(a_frag[0], u_smem_ptr);
mtk::wmma::load_vector(a_frag[1], u_smem_ptr + FDIM);
mtk::wmma::load_vector(b_frag[0], u_smem_ptr);
mtk::wmma::load_vector(b_frag[1], u_smem_ptr + FDIM);
nvcuda::wmma::fill_fragment(c_frag[0], 0.0f);
nvcuda::wmma::mma_sync(c_frag[0], a_frag[0], b_frag[0], c_frag[0]);
nvcuda::wmma::fill_fragment(c_frag[1], 0.0f);
nvcuda::wmma::mma_sync(c_frag[1], a_frag[1], b_frag[0], c_frag[1]);
nvcuda::wmma::fill_fragment(c_frag[2], 0.0f);
nvcuda::wmma::mma_sync(c_frag[2], a_frag[0], b_frag[1], c_frag[2]);
nvcuda::wmma::fill_fragment(c_frag[3], 0.0f);
nvcuda::wmma::mma_sync(c_frag[3], a_frag[1], b_frag[1], c_frag[3]);
nvcuda::wmma::store_matrix_sync(c_smem_ptr, c_frag[0], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM, c_frag[1], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM * DIM, c_frag[2], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM * DIM + FDIM, c_frag[3], DIM, nvcuda::wmma::mem_col_major);
for (unsigned i = 0; i < DIM * block_size; i+= block_size) {
c_ptr[blockIdx.x * DIM + threadIdx.x + i] = c_smem[threadIdx.x + i];
}
}
template <>
__global__ void batched_direct_product_16x16<false>(float* const c_ptr, const half* const u_ptr) {
constexpr unsigned DIM = 32;
constexpr unsigned FDIM = 16;
__shared__ half u_tmp_smem[block_size / warp_size * FDIM * FDIM];
__shared__ float c_smem[block_size * DIM];
const unsigned warp_id = threadIdx.x >> 5;
half* const u_smem_ptr = u_tmp_smem + warp_id * FDIM * FDIM;
float* const c_smem_ptr = c_ptr + warp_id * DIM * DIM;
for (std::size_t i = 0; i < FDIM * FDIM; i += warp_size) {
u_smem_ptr[i + threadIdx.x] = __float2half(0.0f);
}
u_tmp_smem[threadIdx.x] = u_ptr[blockIdx.x * block_size + threadIdx.x];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, FDIM, FDIM, FDIM, half, nvcuda::wmma::col_major> a_frag[2];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, FDIM, FDIM, FDIM, half, nvcuda::wmma::row_major> b_frag[2];
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, FDIM, FDIM, FDIM, float> c_frag[4];
nvcuda::wmma::load_matrix_sync(a_frag[0], u_smem_ptr, DIM);
nvcuda::wmma::load_matrix_sync(a_frag[1], u_smem_ptr + FDIM, DIM);
nvcuda::wmma::load_matrix_sync(b_frag[0], u_smem_ptr, DIM);
nvcuda::wmma::load_matrix_sync(b_frag[1], u_smem_ptr + FDIM, DIM);
nvcuda::wmma::fill_fragment(c_frag[0], 0.0f);
nvcuda::wmma::mma_sync(c_frag[0], a_frag[0], b_frag[0], c_frag[0]);
nvcuda::wmma::fill_fragment(c_frag[1], 0.0f);
nvcuda::wmma::mma_sync(c_frag[1], a_frag[1], b_frag[0], c_frag[1]);
nvcuda::wmma::fill_fragment(c_frag[2], 0.0f);
nvcuda::wmma::mma_sync(c_frag[2], a_frag[0], b_frag[1], c_frag[2]);
nvcuda::wmma::fill_fragment(c_frag[3], 0.0f);
nvcuda::wmma::mma_sync(c_frag[3], a_frag[1], b_frag[1], c_frag[3]);
nvcuda::wmma::store_matrix_sync(c_smem_ptr, c_frag[0], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM, c_frag[1], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM * DIM, c_frag[2], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM * DIM + FDIM, c_frag[3], DIM, nvcuda::wmma::mem_col_major);
for (unsigned i = 0; i < DIM * block_size; i+= block_size) {
c_ptr[blockIdx.x * DIM + threadIdx.x + i] = c_smem[threadIdx.x + i];
}
}
template <bool UseWMMAe>
void test_batched_direct_product(const unsigned size_power) {
constexpr std::size_t C = 1lu << 6;
const unsigned batch_size = 1lu << size_power;
const std::size_t grid_size = batch_size / (block_size / warp_size);
half *dU;
float *dC;
hipMalloc(&dU, sizeof(half) * batch_size * warp_size);
hipMalloc(&dC, sizeof(float) * batch_size * warp_size * warp_size);
const auto start_clock = std::chrono::system_clock::now();
for (std::size_t c = 0; c < C; c++) {
hipLaunchKernelGGL(( batched_direct_product_16x16<UseWMMAe>), dim3(grid_size), dim3(block_size), 0, 0,
dC,
dU
);
}
const auto status = hipGetLastError();
hipDeviceSynchronize();
if (status != 0) {
std::fprintf(stderr, "%s\n", hipGetErrorString(status));
}
const auto end_clock = std::chrono::system_clock::now();
const auto elapsed_time = std::chrono::duration_cast<std::chrono::microseconds>(end_clock - start_clock).count() / 1.e6 / C;
std::printf("%u,%u,%u,%e\n",
static_cast<unsigned>(CUDA_ARCH_SM),
batch_size,
(UseWMMAe ? 1u : 0u),
elapsed_time
);
hipFree(dU);
hipFree(dC);
}
void test_batched_direct_product(const unsigned min_p, const unsigned max_p) {
std::printf("# %s\n", __func__);
std::printf("-- 1\n");
for (unsigned i = min_p; i <= max_p; i++) {
test_batched_direct_product<false>(i);
}
for (unsigned i = min_p; i <= max_p; i++) {
test_batched_direct_product<true>(i);
}
std::printf("-- 2\n");
for (unsigned i = min_p; i <= max_p; i++) {
test_batched_direct_product<false>(i);
}
for (unsigned i = min_p; i <= max_p; i++) {
test_batched_direct_product<true>(i);
}
}
int main() {
test_batched_direct_product(8, 20);
}
|
4c3eb2385a3bff698b43c736eba9fe2136b56a54.cu
|
#include <iostream>
#include <chrono>
#include <type_traits>
#include <mma.h>
#include <wmma_extension/wmma_extension.hpp>
constexpr std::size_t block_size = 256;
constexpr unsigned warp_size = 32;
#ifndef CUDA_ARCH_SM
#define CUDA_ARCH_SM 0
#endif
template <bool UseWMMAe>
__global__ void batched_direct_product_16x16(float* const c_ptr, const half* const u_ptr);
template <>
__global__ void batched_direct_product_16x16<true>(float* const c_ptr, const half* const u_ptr) {
constexpr unsigned DIM = 32;
constexpr unsigned FDIM = 16;
__shared__ half u_smem[block_size];
__shared__ float c_smem[block_size * DIM];
const unsigned warp_id = threadIdx.x >> 5;
half* const u_smem_ptr = u_smem + warp_id * DIM;
float* const c_smem_ptr = c_ptr + warp_id * DIM * DIM;
u_smem[threadIdx.x] = u_ptr[blockIdx.x * block_size + threadIdx.x];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, FDIM, FDIM, FDIM, half, nvcuda::wmma::col_major> a_frag[2];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, FDIM, FDIM, FDIM, half, nvcuda::wmma::row_major> b_frag[2];
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, FDIM, FDIM, FDIM, float> c_frag[4];
mtk::wmma::load_vector(a_frag[0], u_smem_ptr);
mtk::wmma::load_vector(a_frag[1], u_smem_ptr + FDIM);
mtk::wmma::load_vector(b_frag[0], u_smem_ptr);
mtk::wmma::load_vector(b_frag[1], u_smem_ptr + FDIM);
nvcuda::wmma::fill_fragment(c_frag[0], 0.0f);
nvcuda::wmma::mma_sync(c_frag[0], a_frag[0], b_frag[0], c_frag[0]);
nvcuda::wmma::fill_fragment(c_frag[1], 0.0f);
nvcuda::wmma::mma_sync(c_frag[1], a_frag[1], b_frag[0], c_frag[1]);
nvcuda::wmma::fill_fragment(c_frag[2], 0.0f);
nvcuda::wmma::mma_sync(c_frag[2], a_frag[0], b_frag[1], c_frag[2]);
nvcuda::wmma::fill_fragment(c_frag[3], 0.0f);
nvcuda::wmma::mma_sync(c_frag[3], a_frag[1], b_frag[1], c_frag[3]);
nvcuda::wmma::store_matrix_sync(c_smem_ptr, c_frag[0], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM, c_frag[1], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM * DIM, c_frag[2], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM * DIM + FDIM, c_frag[3], DIM, nvcuda::wmma::mem_col_major);
for (unsigned i = 0; i < DIM * block_size; i+= block_size) {
c_ptr[blockIdx.x * DIM + threadIdx.x + i] = c_smem[threadIdx.x + i];
}
}
template <>
__global__ void batched_direct_product_16x16<false>(float* const c_ptr, const half* const u_ptr) {
constexpr unsigned DIM = 32;
constexpr unsigned FDIM = 16;
__shared__ half u_tmp_smem[block_size / warp_size * FDIM * FDIM];
__shared__ float c_smem[block_size * DIM];
const unsigned warp_id = threadIdx.x >> 5;
half* const u_smem_ptr = u_tmp_smem + warp_id * FDIM * FDIM;
float* const c_smem_ptr = c_ptr + warp_id * DIM * DIM;
for (std::size_t i = 0; i < FDIM * FDIM; i += warp_size) {
u_smem_ptr[i + threadIdx.x] = __float2half(0.0f);
}
u_tmp_smem[threadIdx.x] = u_ptr[blockIdx.x * block_size + threadIdx.x];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, FDIM, FDIM, FDIM, half, nvcuda::wmma::col_major> a_frag[2];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, FDIM, FDIM, FDIM, half, nvcuda::wmma::row_major> b_frag[2];
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, FDIM, FDIM, FDIM, float> c_frag[4];
nvcuda::wmma::load_matrix_sync(a_frag[0], u_smem_ptr, DIM);
nvcuda::wmma::load_matrix_sync(a_frag[1], u_smem_ptr + FDIM, DIM);
nvcuda::wmma::load_matrix_sync(b_frag[0], u_smem_ptr, DIM);
nvcuda::wmma::load_matrix_sync(b_frag[1], u_smem_ptr + FDIM, DIM);
nvcuda::wmma::fill_fragment(c_frag[0], 0.0f);
nvcuda::wmma::mma_sync(c_frag[0], a_frag[0], b_frag[0], c_frag[0]);
nvcuda::wmma::fill_fragment(c_frag[1], 0.0f);
nvcuda::wmma::mma_sync(c_frag[1], a_frag[1], b_frag[0], c_frag[1]);
nvcuda::wmma::fill_fragment(c_frag[2], 0.0f);
nvcuda::wmma::mma_sync(c_frag[2], a_frag[0], b_frag[1], c_frag[2]);
nvcuda::wmma::fill_fragment(c_frag[3], 0.0f);
nvcuda::wmma::mma_sync(c_frag[3], a_frag[1], b_frag[1], c_frag[3]);
nvcuda::wmma::store_matrix_sync(c_smem_ptr, c_frag[0], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM, c_frag[1], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM * DIM, c_frag[2], DIM, nvcuda::wmma::mem_col_major);
nvcuda::wmma::store_matrix_sync(c_smem_ptr + FDIM * DIM + FDIM, c_frag[3], DIM, nvcuda::wmma::mem_col_major);
for (unsigned i = 0; i < DIM * block_size; i+= block_size) {
c_ptr[blockIdx.x * DIM + threadIdx.x + i] = c_smem[threadIdx.x + i];
}
}
template <bool UseWMMAe>
void test_batched_direct_product(const unsigned size_power) {
constexpr std::size_t C = 1lu << 6;
const unsigned batch_size = 1lu << size_power;
const std::size_t grid_size = batch_size / (block_size / warp_size);
half *dU;
float *dC;
cudaMalloc(&dU, sizeof(half) * batch_size * warp_size);
cudaMalloc(&dC, sizeof(float) * batch_size * warp_size * warp_size);
const auto start_clock = std::chrono::system_clock::now();
for (std::size_t c = 0; c < C; c++) {
batched_direct_product_16x16<UseWMMAe><<<grid_size, block_size>>>(
dC,
dU
);
}
const auto status = cudaGetLastError();
cudaDeviceSynchronize();
if (status != 0) {
std::fprintf(stderr, "%s\n", cudaGetErrorString(status));
}
const auto end_clock = std::chrono::system_clock::now();
const auto elapsed_time = std::chrono::duration_cast<std::chrono::microseconds>(end_clock - start_clock).count() / 1.e6 / C;
std::printf("%u,%u,%u,%e\n",
static_cast<unsigned>(CUDA_ARCH_SM),
batch_size,
(UseWMMAe ? 1u : 0u),
elapsed_time
);
cudaFree(dU);
cudaFree(dC);
}
void test_batched_direct_product(const unsigned min_p, const unsigned max_p) {
std::printf("# %s\n", __func__);
std::printf("-- 1\n");
for (unsigned i = min_p; i <= max_p; i++) {
test_batched_direct_product<false>(i);
}
for (unsigned i = min_p; i <= max_p; i++) {
test_batched_direct_product<true>(i);
}
std::printf("-- 2\n");
for (unsigned i = min_p; i <= max_p; i++) {
test_batched_direct_product<false>(i);
}
for (unsigned i = min_p; i <= max_p; i++) {
test_batched_direct_product<true>(i);
}
}
int main() {
test_batched_direct_product(8, 20);
}
|
6bec640f8acf635a27952e4cbec0a59dc1c99168.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <math_constants.h>
extern "C"
{
__global__ void
rtruncnorm_kernel(float *x, int n,
float *mu, float *sigma,
float *lo, float *hi,
int maxtries, int rng_a,
int rng_b, int rng_c)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < n){
// Setup the RNG:
hiprandState_t rng;
hiprand_init(rng_a+rng_b*idx,rng_c,0,&rng);
// Draw sample
int ntries = 0;
int accepted = 0;
while(!accepted and ntries < maxtries){
float ran = hiprand_normal(&rng);
ran = mu[idx]+ran*sigma[idx];
ntries += 1;
if(ran >= lo[idx] and ran <= hi[idx]){
accepted = 1;
}
}
// Store sample:
x[idx] = ran;
}
return;
}
} // END extern "C"
|
6bec640f8acf635a27952e4cbec0a59dc1c99168.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
extern "C"
{
__global__ void
rtruncnorm_kernel(float *x, int n,
float *mu, float *sigma,
float *lo, float *hi,
int maxtries, int rng_a,
int rng_b, int rng_c)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < n){
// Setup the RNG:
curandState rng;
curand_init(rng_a+rng_b*idx,rng_c,0,&rng);
// Draw sample
int ntries = 0;
int accepted = 0;
while(!accepted and ntries < maxtries){
float ran = curand_normal(&rng);
ran = mu[idx]+ran*sigma[idx];
ntries += 1;
if(ran >= lo[idx] and ran <= hi[idx]){
accepted = 1;
}
}
// Store sample:
x[idx] = ran;
}
return;
}
} // END extern "C"
|
1a081ea31e0036f5ac5d83f65bc77ffb2b47ef11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_image2D1C_ConvolveRow.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *img = NULL;
hipMalloc(&img, XSIZE*YSIZE);
int n_x = 1;
int n_y = 1;
short k = 1;
float *kernel = NULL;
hipMalloc(&kernel, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_image2D1C_ConvolveRow), dim3(gridBlock),dim3(threadBlock), 0, 0, img,n_x,n_y,k,kernel,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_image2D1C_ConvolveRow), dim3(gridBlock),dim3(threadBlock), 0, 0, img,n_x,n_y,k,kernel,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_image2D1C_ConvolveRow), dim3(gridBlock),dim3(threadBlock), 0, 0, img,n_x,n_y,k,kernel,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
1a081ea31e0036f5ac5d83f65bc77ffb2b47ef11.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_image2D1C_ConvolveRow.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *img = NULL;
cudaMalloc(&img, XSIZE*YSIZE);
int n_x = 1;
int n_y = 1;
short k = 1;
float *kernel = NULL;
cudaMalloc(&kernel, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_image2D1C_ConvolveRow<<<gridBlock,threadBlock>>>(img,n_x,n_y,k,kernel,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_image2D1C_ConvolveRow<<<gridBlock,threadBlock>>>(img,n_x,n_y,k,kernel,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_image2D1C_ConvolveRow<<<gridBlock,threadBlock>>>(img,n_x,n_y,k,kernel,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
5e417f9d19410e4dea497a3ebe75b86c21c6a12c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCDHW) {
// x (gradO) has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC)
// z (gradI) has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimID;
__shared__ uint factorD, factorH, factorW;
__shared__ Nd4jLong zLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimID = isNCDHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 5;
factorD = xShapeInfo[dimID + 1] / zShapeInfo[dimID + 1];
factorH = xShapeInfo[dimID + 2] / zShapeInfo[dimID + 2];
factorW = xShapeInfo[dimID + 3] / zShapeInfo[dimID + 3];
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
z[zOffset] = 0;
const Nd4jLong zCoord2 = coords[dimID] * factorD;
const Nd4jLong zCoord3 = coords[dimID + 1] * factorH;
const Nd4jLong zCoord4 = coords[dimID + 2] * factorW;
for(coords[dimID] = zCoord2; coords[dimID] < zCoord2 + factorD; ++coords[dimID])
for(coords[dimID + 1] = zCoord3; coords[dimID + 1] < zCoord3 + factorH; ++coords[dimID + 1])
for(coords[dimID + 2] = zCoord4; coords[dimID + 2] < zCoord4 + factorW; ++coords[dimID + 2])
z[zOffset] += x[shape::getOffset(xShapeInfo, coords)];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCDHW) {
hipLaunchKernelGGL(( upsampling3dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, isNCDHW);
}
//////////////////////////////////////////////////////////////////////////
ND4J_LOCAL void ConvolutionUtils::upsampling3dBP(sd::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCDHW) {
PointersManager manager(block.launchContext(), "upsampling3d_bp");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCDHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
}
}
|
5e417f9d19410e4dea497a3ebe75b86c21c6a12c.cu
|
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCDHW) {
// x (gradO) has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC)
// z (gradI) has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimID;
__shared__ uint factorD, factorH, factorW;
__shared__ Nd4jLong zLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimID = isNCDHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 5;
factorD = xShapeInfo[dimID + 1] / zShapeInfo[dimID + 1];
factorH = xShapeInfo[dimID + 2] / zShapeInfo[dimID + 2];
factorW = xShapeInfo[dimID + 3] / zShapeInfo[dimID + 3];
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
z[zOffset] = 0;
const Nd4jLong zCoord2 = coords[dimID] * factorD;
const Nd4jLong zCoord3 = coords[dimID + 1] * factorH;
const Nd4jLong zCoord4 = coords[dimID + 2] * factorW;
for(coords[dimID] = zCoord2; coords[dimID] < zCoord2 + factorD; ++coords[dimID])
for(coords[dimID + 1] = zCoord3; coords[dimID + 1] < zCoord3 + factorH; ++coords[dimID + 1])
for(coords[dimID + 2] = zCoord4; coords[dimID + 2] < zCoord4 + factorW; ++coords[dimID + 2])
z[zOffset] += x[shape::getOffset(xShapeInfo, coords)];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCDHW) {
upsampling3dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, isNCDHW);
}
//////////////////////////////////////////////////////////////////////////
ND4J_LOCAL void ConvolutionUtils::upsampling3dBP(sd::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCDHW) {
PointersManager manager(block.launchContext(), "upsampling3d_bp");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCDHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
}
}
|
cc777187ff65a76d3a7b014a9cbc09daaed2aa11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include <mpi.h>
#include "util.h"
#include "CudaStream.h"
#include "CudaEvent.h"
// 2D diffusion example with mpi
// the grid has a fixed width of nx=128
// the use specifies the height, ny, as a power of two
// note that nx and ny have 2 added to them to account for halos
//
// the domain decomposition is in the vertical
// ny is the height of the local sub-domain
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank);
template <typename T>
void fill_gpu(T *v, T value, int n);
__global__
void diffusion(double *x0, double *x1, int nx, int ny, double dt) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto j = threadIdx.y + blockDim.y*blockIdx.y;
if(i<nx-2 && j<ny-2) {
auto pos = i+1 + (j+1)*nx;
x1[pos] = x0[pos] + dt * (-4.*x0[pos]
+ x0[pos-nx] + x0[pos+nx]
+ x0[pos-1] + x0[pos+1]);
}
}
int main(int argc, char** argv) {
// set up parameters
// first argument is the y dimension = 2^arg
size_t pow = read_arg(argc, argv, 1, 8);
// second argument is the number of time steps
size_t nsteps = read_arg(argc, argv, 2, 100);
// set domain size
size_t nx = 128;
size_t ny = 1 << pow;
double dt = 0.1;
// initialize MPI
int mpi_rank, mpi_size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
// calculate global domain sizes
if(ny%mpi_size) {
std::cout << "error : global domain dimension " << ny
<< "must be divisible by number of MPI ranks " << mpi_size
<< std::endl;
exit(1);
}
else if(mpi_rank==0) {
std::cout << "\n## " << mpi_size << " MPI ranks" << std::endl;
std::cout << "## " << nx << "x" << ny
<< " : " << nx << "x" << ny/mpi_size << " per rank"
<< " for " << nsteps << " time steps"
<< " (" << nx*ny << " grid points)"
<< std::endl;
}
ny /= mpi_size;
// adjust dimensions for halo
nx += 2;
ny += 2;
// allocate memory on device and host
// note : allocate enough memory for the halo around the boundary
auto buffer_size = nx*ny;
double *x_host = malloc_host_pinned<double>(buffer_size);
double *x0 = malloc_device<double>(buffer_size);
double *x1 = malloc_device<double>(buffer_size);
// set initial conditions of 0 everywhere
fill_gpu(x0, 0., buffer_size);
fill_gpu(x1, 0., buffer_size);
// set boundary conditions of 1 on south border
if(mpi_rank==0) { // south boundary
fill_gpu(x0, 1., nx);
fill_gpu(x1, 1., nx);
}
if(mpi_rank==mpi_size-1) { // north boundary
fill_gpu(x0+nx*(ny-1), 1., nx);
fill_gpu(x1+nx*(ny-1), 1., nx);
}
CudaStream stream;
CudaStream copy_stream(true);
auto start_event = stream.enqueue_event();
dim3 block_dim(8,8);
dim3 grid_dim((nx-2)/block_dim.x, (ny-2)/block_dim.y);
auto south = mpi_rank - 1;
auto north = mpi_rank + 1;
// time stepping loop
for(auto step=0; step<nsteps; ++step) {
MPI_Request requests[4];
MPI_Status statuses[4];
auto num_requests = 0;
// exchange with south
if(south>=0) {
// x0(:, 0) <- south
MPI_Irecv(x0, nx, MPI_DOUBLE, south, 0, MPI_COMM_WORLD, &requests[0]);
// x0(:, 1) -> south
MPI_Isend(x0+nx, nx, MPI_DOUBLE, south, 0, MPI_COMM_WORLD, &requests[1]);
num_requests+=2;
}
// exchange with north
if(north<mpi_size) {
// x0(:, ny-1) <- north
MPI_Irecv(x0+(ny-1)*nx, nx, MPI_DOUBLE, north, 0, MPI_COMM_WORLD, &requests[num_requests]);
// x0(:, ny-2) -> north
MPI_Isend(x0+(ny-2)*nx, nx, MPI_DOUBLE, north, 0, MPI_COMM_WORLD, &requests[num_requests+1]);
num_requests+=2;
}
MPI_Waitall(num_requests, requests, statuses);
// TODO copy in the kernel launch from diffusion2d.cu
hipLaunchKernelGGL(( diffusion), dim3(grid_dim), dim3(block_dim), 0, 0, x0, x1, nx, ny, dt);
std::swap(x0, x1);
}
auto stop_event = stream.enqueue_event();
stop_event.wait();
copy_to_host<double>(x0, x_host, buffer_size);
double time = stop_event.time_since(start_event);
if(mpi_rank==0) {
std::cout << "## " << time << "s, "
<< nsteps*(nx-2)*(ny-2)*mpi_size / time << " points/second"
<< std::endl << std::endl;
std::cout << "writing to output.bin/bov" << std::endl;
}
write_to_file(nx, ny, x_host, mpi_size, mpi_rank);
MPI_Finalize();
return 0;
}
template <typename T>
__global__
void fill(T *v, T value, int n) {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid<n) {
v[tid] = value;
}
}
template <typename T>
void fill_gpu(T *v, T value, int n) {
auto block_dim = 192ul;
auto grid_dim = n/block_dim + (n%block_dim ? 1 : 0);
hipLaunchKernelGGL(( fill<T>), dim3(grid_dim), dim3(block_dim), 0, 0, v, value, n);
}
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank) {
// collect the global solution to the root rank
auto block_size = nx*(ny-2); // discard first and last rows
std::vector<double> data_global(mpi_size*block_size);
MPI_Gather(data+nx, block_size, MPI_DOUBLE,
&data_global[0], block_size, MPI_DOUBLE,
0, MPI_COMM_WORLD);
if(mpi_rank==0) {
FILE* output = fopen("output.bin", "w");
fwrite(&data_global[0], sizeof(double), mpi_size* nx * (ny-2), output);
fclose(output);
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << nx << ", " << mpi_size*(ny-2) << ", 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 1.0 1.0" << std::endl;
}
}
|
cc777187ff65a76d3a7b014a9cbc09daaed2aa11.cu
|
#include <fstream>
#include <iostream>
#include <vector>
#include <cuda.h>
#include <mpi.h>
#include "util.h"
#include "CudaStream.h"
#include "CudaEvent.h"
// 2D diffusion example with mpi
// the grid has a fixed width of nx=128
// the use specifies the height, ny, as a power of two
// note that nx and ny have 2 added to them to account for halos
//
// the domain decomposition is in the vertical
// ny is the height of the local sub-domain
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank);
template <typename T>
void fill_gpu(T *v, T value, int n);
__global__
void diffusion(double *x0, double *x1, int nx, int ny, double dt) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto j = threadIdx.y + blockDim.y*blockIdx.y;
if(i<nx-2 && j<ny-2) {
auto pos = i+1 + (j+1)*nx;
x1[pos] = x0[pos] + dt * (-4.*x0[pos]
+ x0[pos-nx] + x0[pos+nx]
+ x0[pos-1] + x0[pos+1]);
}
}
int main(int argc, char** argv) {
// set up parameters
// first argument is the y dimension = 2^arg
size_t pow = read_arg(argc, argv, 1, 8);
// second argument is the number of time steps
size_t nsteps = read_arg(argc, argv, 2, 100);
// set domain size
size_t nx = 128;
size_t ny = 1 << pow;
double dt = 0.1;
// initialize MPI
int mpi_rank, mpi_size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
// calculate global domain sizes
if(ny%mpi_size) {
std::cout << "error : global domain dimension " << ny
<< "must be divisible by number of MPI ranks " << mpi_size
<< std::endl;
exit(1);
}
else if(mpi_rank==0) {
std::cout << "\n## " << mpi_size << " MPI ranks" << std::endl;
std::cout << "## " << nx << "x" << ny
<< " : " << nx << "x" << ny/mpi_size << " per rank"
<< " for " << nsteps << " time steps"
<< " (" << nx*ny << " grid points)"
<< std::endl;
}
ny /= mpi_size;
// adjust dimensions for halo
nx += 2;
ny += 2;
// allocate memory on device and host
// note : allocate enough memory for the halo around the boundary
auto buffer_size = nx*ny;
double *x_host = malloc_host_pinned<double>(buffer_size);
double *x0 = malloc_device<double>(buffer_size);
double *x1 = malloc_device<double>(buffer_size);
// set initial conditions of 0 everywhere
fill_gpu(x0, 0., buffer_size);
fill_gpu(x1, 0., buffer_size);
// set boundary conditions of 1 on south border
if(mpi_rank==0) { // south boundary
fill_gpu(x0, 1., nx);
fill_gpu(x1, 1., nx);
}
if(mpi_rank==mpi_size-1) { // north boundary
fill_gpu(x0+nx*(ny-1), 1., nx);
fill_gpu(x1+nx*(ny-1), 1., nx);
}
CudaStream stream;
CudaStream copy_stream(true);
auto start_event = stream.enqueue_event();
dim3 block_dim(8,8);
dim3 grid_dim((nx-2)/block_dim.x, (ny-2)/block_dim.y);
auto south = mpi_rank - 1;
auto north = mpi_rank + 1;
// time stepping loop
for(auto step=0; step<nsteps; ++step) {
MPI_Request requests[4];
MPI_Status statuses[4];
auto num_requests = 0;
// exchange with south
if(south>=0) {
// x0(:, 0) <- south
MPI_Irecv(x0, nx, MPI_DOUBLE, south, 0, MPI_COMM_WORLD, &requests[0]);
// x0(:, 1) -> south
MPI_Isend(x0+nx, nx, MPI_DOUBLE, south, 0, MPI_COMM_WORLD, &requests[1]);
num_requests+=2;
}
// exchange with north
if(north<mpi_size) {
// x0(:, ny-1) <- north
MPI_Irecv(x0+(ny-1)*nx, nx, MPI_DOUBLE, north, 0, MPI_COMM_WORLD, &requests[num_requests]);
// x0(:, ny-2) -> north
MPI_Isend(x0+(ny-2)*nx, nx, MPI_DOUBLE, north, 0, MPI_COMM_WORLD, &requests[num_requests+1]);
num_requests+=2;
}
MPI_Waitall(num_requests, requests, statuses);
// TODO copy in the kernel launch from diffusion2d.cu
diffusion<<<grid_dim, block_dim>>>(x0, x1, nx, ny, dt);
std::swap(x0, x1);
}
auto stop_event = stream.enqueue_event();
stop_event.wait();
copy_to_host<double>(x0, x_host, buffer_size);
double time = stop_event.time_since(start_event);
if(mpi_rank==0) {
std::cout << "## " << time << "s, "
<< nsteps*(nx-2)*(ny-2)*mpi_size / time << " points/second"
<< std::endl << std::endl;
std::cout << "writing to output.bin/bov" << std::endl;
}
write_to_file(nx, ny, x_host, mpi_size, mpi_rank);
MPI_Finalize();
return 0;
}
template <typename T>
__global__
void fill(T *v, T value, int n) {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid<n) {
v[tid] = value;
}
}
template <typename T>
void fill_gpu(T *v, T value, int n) {
auto block_dim = 192ul;
auto grid_dim = n/block_dim + (n%block_dim ? 1 : 0);
fill<T><<<grid_dim, block_dim>>>(v, value, n);
}
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank) {
// collect the global solution to the root rank
auto block_size = nx*(ny-2); // discard first and last rows
std::vector<double> data_global(mpi_size*block_size);
MPI_Gather(data+nx, block_size, MPI_DOUBLE,
&data_global[0], block_size, MPI_DOUBLE,
0, MPI_COMM_WORLD);
if(mpi_rank==0) {
FILE* output = fopen("output.bin", "w");
fwrite(&data_global[0], sizeof(double), mpi_size* nx * (ny-2), output);
fclose(output);
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << nx << ", " << mpi_size*(ny-2) << ", 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 1.0 1.0" << std::endl;
}
}
|
7e8d17fd754da4a6199429e8cf79534ad2f576c1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime_api.h"
/* Scratch space positions */
const int max_ns = 1024;
const int lower_delta_pos = 0;
const int mid_delta_pos = lower_delta_pos + max_ns;
const int upper_delta_pos = mid_delta_pos + max_ns;
const int lower_gamma_pos = upper_delta_pos + max_ns;
const int mid_gamma_pos = lower_gamma_pos + max_ns;
const int upper_gamma_pos = mid_gamma_pos + max_ns;
const int matrix_equal_pos = upper_gamma_pos + max_ns;
const int scratch_space_size = matrix_equal_pos + max_ns;
void print_cuda_error(hipError_t err, char *at)
{
if (err)
{
printf("Error from CUDA at : %s\n", at);
printf("Message: %s\n", hipGetErrorString(err));
}
}
__device__ float call_payoff(const float s, const float k)
{
return fmaxf(0.0f, s - k);
}
__device__ void get_coeffs(const float *const grid, float *const scratch, const int ns, const int i)
{
/* Difference vs. the grid below */
float d0;
float d1;
if (i == 0)
{
d0 = grid[1] - grid[0];
d1 = grid[2] - grid[1];
}
else if (i == (ns - 1))
{
d0 = grid[i - 1] - grid[i - 2];
d1 = grid[i] - grid[i - 1];
}
else
{
d0 = grid[i] - grid[i - 1];
d1 = grid[i + 1] - grid[i];
}
const float d1_p_d2 = d0 + d1;
/* Delta coeffs */
/* Middle */
if ((i != 0) & (i != (ns - 1)))
{
scratch[lower_delta_pos + i] = -d1 / (d0 * d1_p_d2);
scratch[mid_delta_pos + i] = (d1 - d0) / (d0 * d1);
scratch[upper_delta_pos + i] = d0 / (d1 * d1_p_d2);
}
/* Lower boundary */
else if (i == 0)
{
scratch[lower_delta_pos + i] = (-2.0f * d0 - d1) / (d0 * d1_p_d2);
scratch[mid_delta_pos + i] = d1_p_d2 / (d0 * d1);
scratch[upper_delta_pos + i] = -d0 / (d1 * d1_p_d2);
}
/* Upper boundary */
else if (i == (ns - 1))
{
scratch[lower_delta_pos + i] = d1 / (d0 * d1_p_d2);
scratch[mid_delta_pos + i] = (-d0 - d1) / (d0 * d1);
scratch[upper_delta_pos + i] = (d0 + 2.0f * d1) / (d1 * d1_p_d2);
}
/* Gamma coeffs */
/* Middle */
if ((i != 0) & (i != (ns - 1)))
{
scratch[lower_gamma_pos + i] = 2.0f / (d0 * d1_p_d2);
scratch[mid_gamma_pos + i] = -2.0f / (d0 * d1);
scratch[upper_gamma_pos + i] = 2.0f / (d1 * d1_p_d2);
}
__syncthreads();
}
/* Populate the matrix */
__device__ void explicit_step(float *const scratch, float *const matrix_equal, const float *const tp1, const float *const grid,
const float half_sigma_sq, const float r, const float t_inc, const int ns, const int i)
{
/* Boundary conditions */
/* s = 0.0 */
if (i == 0)
{
const float b = -r * t_inc;
matrix_equal[0] = (1.0f + b) * tp1[0];
}
/* s = s_max*/
else if (i == (ns - 1))
{
const float r_s = r * grid[ns - 1];
const float a = -r_s * t_inc;
const float b = -(r - r_s) * t_inc;
matrix_equal[ns - 1] = a * tp1[ns - 2];
matrix_equal[ns - 1] += (1.0f + b) * tp1[ns - 1];
}
else if (i < ns)
{
const float g = half_sigma_sq * grid[i] * grid[i];
const float r_s = r * grid[i];
const float a = ((scratch[lower_delta_pos + i] * r_s) + (scratch[lower_gamma_pos + i] * g)) * t_inc;
const float b = ((scratch[mid_delta_pos + i] * r_s) + (scratch[mid_gamma_pos + i] * g) - r) * t_inc;
const float c = ((scratch[upper_delta_pos + i] * r_s) + (scratch[upper_gamma_pos + i] * g)) * t_inc;
matrix_equal[i] = a * tp1[i - 1];
matrix_equal[i] += (1.0f + b) * tp1[i];
matrix_equal[i] += c * tp1[i + 1];
}
__syncthreads();
}
__global__ void explicit_scheme(const float *const grid, float *const scratch, const float half_sigma_sq, const float r,
const float t_inc, const float k, const int ns, const int nt)
{
const int i = threadIdx.x;
if (ns & 0x1f)
{
/* Only multiple of 32 space steps are supported */
return;
}
/* Move grid to shared memory, needed for off by 1 access and reused */
__shared__ float shared_equal[max_ns];
shared_equal[i] = grid[i];
__syncthreads();
/* Build grid based coeffs, completely parrallel */
__shared__ float shared_tp1[max_ns];
shared_tp1[i] = call_payoff(shared_equal[i], k);
get_coeffs(shared_equal, scratch, ns, i);
/* Solve back in time */
for (unsigned int j = 0; j < nt >> 1; ++j)
{
explicit_step(scratch, shared_equal, shared_tp1, grid, half_sigma_sq, r, t_inc, ns, i);
shared_equal[i] = fmaxf(shared_equal[i], call_payoff(shared_equal[i], k));
explicit_step(scratch, shared_tp1, shared_equal, grid, half_sigma_sq, r, t_inc, ns, i);
shared_tp1[i] = fmaxf(shared_tp1[i], call_payoff(shared_tp1[i], k));
}
scratch[matrix_equal_pos + i] = shared_tp1[i];
}
void american_call_test()
{
/* Pricing set up */
printf("Pricing American Call\n");
const unsigned int ns = 1024; /* Want multiples of warp size (32) */
const float s = 100.0f;
const float r = 0.05f;
const float sigma = 0.2f;
const float half_sigma_sq = 0.5f * sigma * sigma;
const float k = 100.0f;
const float t = 1.0f;
const float t_inc = 0.9f / (static_cast<float>(ns * ns) * sigma * sigma);
const int nt = static_cast<int>(t / t_inc) + 1;
/* Build regular grid based at 0 */
float *grid = new float [ns];
const float s_inc = (s * 3.0f) / ns;
for (unsigned int i = 0; i < ns; ++i)
{
grid[i] = i * s_inc;
}
print_cuda_error(hipSetDevice(0), "Set device");
/* Prepare device memory */
float *dev_grid;
print_cuda_error(hipMalloc((void **)&dev_grid, ns * sizeof(float)), "Malloc grid");
print_cuda_error(hipMemcpy(dev_grid, grid, ns * sizeof(float), hipMemcpyHostToDevice), "Copy grid to device");
float *dev_scratch;
print_cuda_error(hipMalloc((void **)&dev_scratch, scratch_space_size * sizeof(float)), "Malloc scratch");
print_cuda_error(hipMemset(dev_scratch, 0, scratch_space_size * sizeof(float)), "Clear scratch");
/* Call kernels */
hipProfilerStart();
hipLaunchKernelGGL(( explicit_scheme), dim3(1), dim3(ns), 0, 0, dev_grid, dev_scratch, half_sigma_sq, r, t_inc, k, ns, nt);
hipProfilerStop();
print_cuda_error(hipGetLastError(), "Kernel execution");
float *res = new float [ns];
print_cuda_error(hipMemcpy(res, &dev_scratch[matrix_equal_pos], ns * sizeof(float), hipMemcpyDeviceToHost), "Copy grid to host");
for (unsigned int i = 0; i < ns; ++i)
{
printf("%.2f: %.2f\n", grid[i], res[i]);
}
/* Clean up */
print_cuda_error(hipFree(dev_grid), "Free grid");
print_cuda_error(hipFree(dev_scratch), "Free scratch");
print_cuda_error(hipDeviceReset(), "Device reset");
delete [] grid;
delete [] res;
}
int main()
{
american_call_test();
return 0;
}
|
7e8d17fd754da4a6199429e8cf79534ad2f576c1.cu
|
#include <stdio.h>
#include "cuda_profiler_api.h"
/* Scratch space positions */
const int max_ns = 1024;
const int lower_delta_pos = 0;
const int mid_delta_pos = lower_delta_pos + max_ns;
const int upper_delta_pos = mid_delta_pos + max_ns;
const int lower_gamma_pos = upper_delta_pos + max_ns;
const int mid_gamma_pos = lower_gamma_pos + max_ns;
const int upper_gamma_pos = mid_gamma_pos + max_ns;
const int matrix_equal_pos = upper_gamma_pos + max_ns;
const int scratch_space_size = matrix_equal_pos + max_ns;
void print_cuda_error(cudaError_t err, char *at)
{
if (err)
{
printf("Error from CUDA at : %s\n", at);
printf("Message: %s\n", cudaGetErrorString(err));
}
}
__device__ float call_payoff(const float s, const float k)
{
return fmaxf(0.0f, s - k);
}
__device__ void get_coeffs(const float *const grid, float *const scratch, const int ns, const int i)
{
/* Difference vs. the grid below */
float d0;
float d1;
if (i == 0)
{
d0 = grid[1] - grid[0];
d1 = grid[2] - grid[1];
}
else if (i == (ns - 1))
{
d0 = grid[i - 1] - grid[i - 2];
d1 = grid[i] - grid[i - 1];
}
else
{
d0 = grid[i] - grid[i - 1];
d1 = grid[i + 1] - grid[i];
}
const float d1_p_d2 = d0 + d1;
/* Delta coeffs */
/* Middle */
if ((i != 0) & (i != (ns - 1)))
{
scratch[lower_delta_pos + i] = -d1 / (d0 * d1_p_d2);
scratch[mid_delta_pos + i] = (d1 - d0) / (d0 * d1);
scratch[upper_delta_pos + i] = d0 / (d1 * d1_p_d2);
}
/* Lower boundary */
else if (i == 0)
{
scratch[lower_delta_pos + i] = (-2.0f * d0 - d1) / (d0 * d1_p_d2);
scratch[mid_delta_pos + i] = d1_p_d2 / (d0 * d1);
scratch[upper_delta_pos + i] = -d0 / (d1 * d1_p_d2);
}
/* Upper boundary */
else if (i == (ns - 1))
{
scratch[lower_delta_pos + i] = d1 / (d0 * d1_p_d2);
scratch[mid_delta_pos + i] = (-d0 - d1) / (d0 * d1);
scratch[upper_delta_pos + i] = (d0 + 2.0f * d1) / (d1 * d1_p_d2);
}
/* Gamma coeffs */
/* Middle */
if ((i != 0) & (i != (ns - 1)))
{
scratch[lower_gamma_pos + i] = 2.0f / (d0 * d1_p_d2);
scratch[mid_gamma_pos + i] = -2.0f / (d0 * d1);
scratch[upper_gamma_pos + i] = 2.0f / (d1 * d1_p_d2);
}
__syncthreads();
}
/* Populate the matrix */
__device__ void explicit_step(float *const scratch, float *const matrix_equal, const float *const tp1, const float *const grid,
const float half_sigma_sq, const float r, const float t_inc, const int ns, const int i)
{
/* Boundary conditions */
/* s = 0.0 */
if (i == 0)
{
const float b = -r * t_inc;
matrix_equal[0] = (1.0f + b) * tp1[0];
}
/* s = s_max*/
else if (i == (ns - 1))
{
const float r_s = r * grid[ns - 1];
const float a = -r_s * t_inc;
const float b = -(r - r_s) * t_inc;
matrix_equal[ns - 1] = a * tp1[ns - 2];
matrix_equal[ns - 1] += (1.0f + b) * tp1[ns - 1];
}
else if (i < ns)
{
const float g = half_sigma_sq * grid[i] * grid[i];
const float r_s = r * grid[i];
const float a = ((scratch[lower_delta_pos + i] * r_s) + (scratch[lower_gamma_pos + i] * g)) * t_inc;
const float b = ((scratch[mid_delta_pos + i] * r_s) + (scratch[mid_gamma_pos + i] * g) - r) * t_inc;
const float c = ((scratch[upper_delta_pos + i] * r_s) + (scratch[upper_gamma_pos + i] * g)) * t_inc;
matrix_equal[i] = a * tp1[i - 1];
matrix_equal[i] += (1.0f + b) * tp1[i];
matrix_equal[i] += c * tp1[i + 1];
}
__syncthreads();
}
__global__ void explicit_scheme(const float *const grid, float *const scratch, const float half_sigma_sq, const float r,
const float t_inc, const float k, const int ns, const int nt)
{
const int i = threadIdx.x;
if (ns & 0x1f)
{
/* Only multiple of 32 space steps are supported */
return;
}
/* Move grid to shared memory, needed for off by 1 access and reused */
__shared__ float shared_equal[max_ns];
shared_equal[i] = grid[i];
__syncthreads();
/* Build grid based coeffs, completely parrallel */
__shared__ float shared_tp1[max_ns];
shared_tp1[i] = call_payoff(shared_equal[i], k);
get_coeffs(shared_equal, scratch, ns, i);
/* Solve back in time */
for (unsigned int j = 0; j < nt >> 1; ++j)
{
explicit_step(scratch, shared_equal, shared_tp1, grid, half_sigma_sq, r, t_inc, ns, i);
shared_equal[i] = fmaxf(shared_equal[i], call_payoff(shared_equal[i], k));
explicit_step(scratch, shared_tp1, shared_equal, grid, half_sigma_sq, r, t_inc, ns, i);
shared_tp1[i] = fmaxf(shared_tp1[i], call_payoff(shared_tp1[i], k));
}
scratch[matrix_equal_pos + i] = shared_tp1[i];
}
void american_call_test()
{
/* Pricing set up */
printf("Pricing American Call\n");
const unsigned int ns = 1024; /* Want multiples of warp size (32) */
const float s = 100.0f;
const float r = 0.05f;
const float sigma = 0.2f;
const float half_sigma_sq = 0.5f * sigma * sigma;
const float k = 100.0f;
const float t = 1.0f;
const float t_inc = 0.9f / (static_cast<float>(ns * ns) * sigma * sigma);
const int nt = static_cast<int>(t / t_inc) + 1;
/* Build regular grid based at 0 */
float *grid = new float [ns];
const float s_inc = (s * 3.0f) / ns;
for (unsigned int i = 0; i < ns; ++i)
{
grid[i] = i * s_inc;
}
print_cuda_error(cudaSetDevice(0), "Set device");
/* Prepare device memory */
float *dev_grid;
print_cuda_error(cudaMalloc((void **)&dev_grid, ns * sizeof(float)), "Malloc grid");
print_cuda_error(cudaMemcpy(dev_grid, grid, ns * sizeof(float), cudaMemcpyHostToDevice), "Copy grid to device");
float *dev_scratch;
print_cuda_error(cudaMalloc((void **)&dev_scratch, scratch_space_size * sizeof(float)), "Malloc scratch");
print_cuda_error(cudaMemset(dev_scratch, 0, scratch_space_size * sizeof(float)), "Clear scratch");
/* Call kernels */
cudaProfilerStart();
explicit_scheme<<<1, ns>>>(dev_grid, dev_scratch, half_sigma_sq, r, t_inc, k, ns, nt);
cudaProfilerStop();
print_cuda_error(cudaGetLastError(), "Kernel execution");
float *res = new float [ns];
print_cuda_error(cudaMemcpy(res, &dev_scratch[matrix_equal_pos], ns * sizeof(float), cudaMemcpyDeviceToHost), "Copy grid to host");
for (unsigned int i = 0; i < ns; ++i)
{
printf("%.2f: %.2f\n", grid[i], res[i]);
}
/* Clean up */
print_cuda_error(cudaFree(dev_grid), "Free grid");
print_cuda_error(cudaFree(dev_scratch), "Free scratch");
print_cuda_error(cudaDeviceReset(), "Device reset");
delete [] grid;
delete [] res;
}
int main()
{
american_call_test();
return 0;
}
|
b362205b55b18f5acb20923d4511cda821fa43e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ __forceinline__ size_t gpu_scalar_index(unsigned int x, unsigned int y)
{
return NX*y+x;
}
__device__ __forceinline__ size_t gpu_s_scalar_index(unsigned int x, unsigned int y)
{
return (2*RAD + nThreads)*y + x;
}
__global__ void gpu_poisson(double *c, double *fi,double *R){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int s_y = threadIdx.y + RAD;
unsigned int s_x = threadIdx.x + RAD;
unsigned int xp1 = (x + blockDim.x) % NX;
unsigned int yp1 = (y + blockDim.y) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
__shared__ double s_in[(2*RAD + nThreads)*3];
// load to shared memory (regular cells)
s_in[gpu_s_scalar_index(s_x,s_y)] = fi[gpu_scalar_index(x, y)];
// load halo cells
if (threadIdx.x < RAD) {
s_in[gpu_s_scalar_index(s_x - RAD, s_y)] = fi[gpu_scalar_index(xm1, y)];
s_in[gpu_s_scalar_index(s_x + blockDim.x, s_y)] = fi[gpu_scalar_index(xp1, y)];
}
if (threadIdx.y < RAD) {
s_in[gpu_s_scalar_index(s_x, s_y - RAD)] = fi[gpu_scalar_index(x, ym1)];
s_in[gpu_s_scalar_index(s_x, s_y + blockDim.y)] = fi[gpu_scalar_index(x, yp1)];
}
// Boundary conditions
if (y == 0) {
fi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY - 1) {
fi[gpu_scalar_index(x, y)] = 0.0;
return;
}
__syncthreads();
double charge = c[gpu_scalar_index(x, y)];
//double phi = fi[gpu_scalar_index(x, y)];
//double phiL = fi[gpu_scalar_index(xm1, y)];
//double phiR = fi[gpu_scalar_index(xp1, y)];
//double phiU = fi[gpu_scalar_index(x, yp1)];
//double phiD = fi[gpu_scalar_index(x, ym1)];
double phi = s_in[gpu_s_scalar_index(s_x, s_y)];
double phiL = s_in[gpu_s_scalar_index(s_x-1, s_y)];
double phiR = s_in[gpu_s_scalar_index(s_x+1, s_y)];
double phiU = s_in[gpu_s_scalar_index(s_x, s_y+1)];
double phiD = s_in[gpu_s_scalar_index(s_x, s_y-1)];
double source = (charge / eps) * dx *dx; // Right hand side of the equation
double phi_old = phi;
phi = 0.25 * (phiL + phiR + phiU + phiD + source);
// Record the error
R[gpu_scalar_index(x, y)] = fabs(phi - phi_old);
//__syncthreads();
fi[gpu_scalar_index(x, y)] = phi;
//if (x == 5 && y == 5) printf("%g\n", phi);
}
|
b362205b55b18f5acb20923d4511cda821fa43e1.cu
|
#include "includes.h"
__device__ __forceinline__ size_t gpu_scalar_index(unsigned int x, unsigned int y)
{
return NX*y+x;
}
__device__ __forceinline__ size_t gpu_s_scalar_index(unsigned int x, unsigned int y)
{
return (2*RAD + nThreads)*y + x;
}
__global__ void gpu_poisson(double *c, double *fi,double *R){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int s_y = threadIdx.y + RAD;
unsigned int s_x = threadIdx.x + RAD;
unsigned int xp1 = (x + blockDim.x) % NX;
unsigned int yp1 = (y + blockDim.y) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
__shared__ double s_in[(2*RAD + nThreads)*3];
// load to shared memory (regular cells)
s_in[gpu_s_scalar_index(s_x,s_y)] = fi[gpu_scalar_index(x, y)];
// load halo cells
if (threadIdx.x < RAD) {
s_in[gpu_s_scalar_index(s_x - RAD, s_y)] = fi[gpu_scalar_index(xm1, y)];
s_in[gpu_s_scalar_index(s_x + blockDim.x, s_y)] = fi[gpu_scalar_index(xp1, y)];
}
if (threadIdx.y < RAD) {
s_in[gpu_s_scalar_index(s_x, s_y - RAD)] = fi[gpu_scalar_index(x, ym1)];
s_in[gpu_s_scalar_index(s_x, s_y + blockDim.y)] = fi[gpu_scalar_index(x, yp1)];
}
// Boundary conditions
if (y == 0) {
fi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY - 1) {
fi[gpu_scalar_index(x, y)] = 0.0;
return;
}
__syncthreads();
double charge = c[gpu_scalar_index(x, y)];
//double phi = fi[gpu_scalar_index(x, y)];
//double phiL = fi[gpu_scalar_index(xm1, y)];
//double phiR = fi[gpu_scalar_index(xp1, y)];
//double phiU = fi[gpu_scalar_index(x, yp1)];
//double phiD = fi[gpu_scalar_index(x, ym1)];
double phi = s_in[gpu_s_scalar_index(s_x, s_y)];
double phiL = s_in[gpu_s_scalar_index(s_x-1, s_y)];
double phiR = s_in[gpu_s_scalar_index(s_x+1, s_y)];
double phiU = s_in[gpu_s_scalar_index(s_x, s_y+1)];
double phiD = s_in[gpu_s_scalar_index(s_x, s_y-1)];
double source = (charge / eps) * dx *dx; // Right hand side of the equation
double phi_old = phi;
phi = 0.25 * (phiL + phiR + phiU + phiD + source);
// Record the error
R[gpu_scalar_index(x, y)] = fabs(phi - phi_old);
//__syncthreads();
fi[gpu_scalar_index(x, y)] = phi;
//if (x == 5 && y == 5) printf("%g\n", phi);
}
|
a7dba43781720ba2f6c246acaaeead1e1fcbf73c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kApplySin.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
hipMalloc(&mat, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
unsigned int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kApplySin), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,target,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kApplySin), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,target,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kApplySin), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,target,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a7dba43781720ba2f6c246acaaeead1e1fcbf73c.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kApplySin.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
cudaMalloc(&mat, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
unsigned int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kApplySin<<<gridBlock,threadBlock>>>(mat,target,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kApplySin<<<gridBlock,threadBlock>>>(mat,target,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kApplySin<<<gridBlock,threadBlock>>>(mat,target,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.